logging
Logging package for Python. Based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2022 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
1# Copyright 2001-2022 by Vinay Sajip. All Rights Reserved. 2# 3# Permission to use, copy, modify, and distribute this software and its 4# documentation for any purpose and without fee is hereby granted, 5# provided that the above copyright notice appear in all copies and that 6# both that copyright notice and this permission notice appear in 7# supporting documentation, and that the name of Vinay Sajip 8# not be used in advertising or publicity pertaining to distribution 9# of the software without specific, written prior permission. 10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING 11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL 12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR 13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER 14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 17""" 18Logging package for Python. Based on PEP 282 and comments thereto in 19comp.lang.python. 20 21Copyright (C) 2001-2022 Vinay Sajip. All Rights Reserved. 22 23To use, simply 'import logging' and log away! 24""" 25 26import sys, os, time, io, re, traceback, warnings, weakref, collections.abc 27 28from types import GenericAlias 29from string import Template 30from string import Formatter as StrFormatter 31 32 33__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR', 34 'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO', 35 'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler', 36 'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig', 37 'captureWarnings', 'critical', 'debug', 'disable', 'error', 38 'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass', 39 'info', 'log', 'makeLogRecord', 'setLoggerClass', 'shutdown', 40 'warn', 'warning', 'getLogRecordFactory', 'setLogRecordFactory', 41 'lastResort', 'raiseExceptions', 'getLevelNamesMapping', 42 'getHandlerByName', 'getHandlerNames'] 43 44import threading 45 46__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>" 47__status__ = "production" 48# The following module attributes are no longer updated. 49__version__ = "0.5.1.2" 50__date__ = "07 February 2010" 51 52#--------------------------------------------------------------------------- 53# Miscellaneous module data 54#--------------------------------------------------------------------------- 55 56# 57#_startTime is used as the base when calculating the relative time of events 58# 59_startTime = time.time_ns() 60 61# 62#raiseExceptions is used to see if exceptions during handling should be 63#propagated 64# 65raiseExceptions = True 66 67# 68# If you don't want threading information in the log, set this to False 69# 70logThreads = True 71 72# 73# If you don't want multiprocessing information in the log, set this to False 74# 75logMultiprocessing = True 76 77# 78# If you don't want process information in the log, set this to False 79# 80logProcesses = True 81 82# 83# If you don't want asyncio task information in the log, set this to False 84# 85logAsyncioTasks = True 86 87#--------------------------------------------------------------------------- 88# Level related stuff 89#--------------------------------------------------------------------------- 90# 91# Default levels and level names, these can be replaced with any positive set 92# of values having corresponding names. There is a pseudo-level, NOTSET, which 93# is only really there as a lower limit for user-defined levels. Handlers and 94# loggers are initialized with NOTSET so that they will log all messages, even 95# at user-defined levels. 96# 97 98CRITICAL = 50 99FATAL = CRITICAL 100ERROR = 40 101WARNING = 30 102WARN = WARNING 103INFO = 20 104DEBUG = 10 105NOTSET = 0 106 107_levelToName = { 108 CRITICAL: 'CRITICAL', 109 ERROR: 'ERROR', 110 WARNING: 'WARNING', 111 INFO: 'INFO', 112 DEBUG: 'DEBUG', 113 NOTSET: 'NOTSET', 114} 115_nameToLevel = { 116 'CRITICAL': CRITICAL, 117 'FATAL': FATAL, 118 'ERROR': ERROR, 119 'WARN': WARNING, 120 'WARNING': WARNING, 121 'INFO': INFO, 122 'DEBUG': DEBUG, 123 'NOTSET': NOTSET, 124} 125 126def getLevelNamesMapping(): 127 return _nameToLevel.copy() 128 129def getLevelName(level): 130 """ 131 Return the textual or numeric representation of logging level 'level'. 132 133 If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, 134 INFO, DEBUG) then you get the corresponding string. If you have 135 associated levels with names using addLevelName then the name you have 136 associated with 'level' is returned. 137 138 If a numeric value corresponding to one of the defined levels is passed 139 in, the corresponding string representation is returned. 140 141 If a string representation of the level is passed in, the corresponding 142 numeric value is returned. 143 144 If no matching numeric or string value is passed in, the string 145 'Level %s' % level is returned. 146 """ 147 # See Issues #22386, #27937 and #29220 for why it's this way 148 result = _levelToName.get(level) 149 if result is not None: 150 return result 151 result = _nameToLevel.get(level) 152 if result is not None: 153 return result 154 return "Level %s" % level 155 156def addLevelName(level, levelName): 157 """ 158 Associate 'levelName' with 'level'. 159 160 This is used when converting levels to text during message formatting. 161 """ 162 with _lock: 163 _levelToName[level] = levelName 164 _nameToLevel[levelName] = level 165 166if hasattr(sys, "_getframe"): 167 currentframe = lambda: sys._getframe(1) 168else: #pragma: no cover 169 def currentframe(): 170 """Return the frame object for the caller's stack frame.""" 171 try: 172 raise Exception 173 except Exception as exc: 174 return exc.__traceback__.tb_frame.f_back 175 176# 177# _srcfile is used when walking the stack to check when we've got the first 178# caller stack frame, by skipping frames whose filename is that of this 179# module's source. It therefore should contain the filename of this module's 180# source file. 181# 182# Ordinarily we would use __file__ for this, but frozen modules don't always 183# have __file__ set, for some reason (see Issue #21736). Thus, we get the 184# filename from a handy code object from a function defined in this module. 185# (There's no particular reason for picking addLevelName.) 186# 187 188_srcfile = os.path.normcase(addLevelName.__code__.co_filename) 189 190# _srcfile is only used in conjunction with sys._getframe(). 191# Setting _srcfile to None will prevent findCaller() from being called. This 192# way, you can avoid the overhead of fetching caller information. 193 194# The following is based on warnings._is_internal_frame. It makes sure that 195# frames of the import mechanism are skipped when logging at module level and 196# using a stacklevel value greater than one. 197def _is_internal_frame(frame): 198 """Signal whether the frame is a CPython or logging module internal.""" 199 filename = os.path.normcase(frame.f_code.co_filename) 200 return filename == _srcfile or ( 201 "importlib" in filename and "_bootstrap" in filename 202 ) 203 204 205def _checkLevel(level): 206 if isinstance(level, int): 207 rv = level 208 elif str(level) == level: 209 if level not in _nameToLevel: 210 raise ValueError("Unknown level: %r" % level) 211 rv = _nameToLevel[level] 212 else: 213 raise TypeError("Level not an integer or a valid string: %r" 214 % (level,)) 215 return rv 216 217#--------------------------------------------------------------------------- 218# Thread-related stuff 219#--------------------------------------------------------------------------- 220 221# 222#_lock is used to serialize access to shared data structures in this module. 223#This needs to be an RLock because fileConfig() creates and configures 224#Handlers, and so might arbitrary user threads. Since Handler code updates the 225#shared dictionary _handlers, it needs to acquire the lock. But if configuring, 226#the lock would already have been acquired - so we need an RLock. 227#The same argument applies to Loggers and Manager.loggerDict. 228# 229_lock = threading.RLock() 230 231def _prepareFork(): 232 """ 233 Prepare to fork a new child process by acquiring the module-level lock. 234 235 This should be used in conjunction with _afterFork(). 236 """ 237 # Wrap the lock acquisition in a try-except to prevent the lock from being 238 # abandoned in the event of an asynchronous exception. See gh-106238. 239 try: 240 _lock.acquire() 241 except BaseException: 242 _lock.release() 243 raise 244 245def _afterFork(): 246 """ 247 After a new child process has been forked, release the module-level lock. 248 249 This should be used in conjunction with _prepareFork(). 250 """ 251 _lock.release() 252 253 254# Prevent a held logging lock from blocking a child from logging. 255 256if not hasattr(os, 'register_at_fork'): # Windows and friends. 257 def _register_at_fork_reinit_lock(instance): 258 pass # no-op when os.register_at_fork does not exist. 259else: 260 # A collection of instances with a _at_fork_reinit method (logging.Handler) 261 # to be called in the child after forking. The weakref avoids us keeping 262 # discarded Handler instances alive. 263 _at_fork_reinit_lock_weakset = weakref.WeakSet() 264 265 def _register_at_fork_reinit_lock(instance): 266 with _lock: 267 _at_fork_reinit_lock_weakset.add(instance) 268 269 def _after_at_fork_child_reinit_locks(): 270 for handler in _at_fork_reinit_lock_weakset: 271 handler._at_fork_reinit() 272 273 # _prepareFork() was called in the parent before forking. 274 # The lock is reinitialized to unlocked state. 275 _lock._at_fork_reinit() 276 277 os.register_at_fork(before=_prepareFork, 278 after_in_child=_after_at_fork_child_reinit_locks, 279 after_in_parent=_afterFork) 280 281 282#--------------------------------------------------------------------------- 283# The logging record 284#--------------------------------------------------------------------------- 285 286class LogRecord(object): 287 """ 288 A LogRecord instance represents an event being logged. 289 290 LogRecord instances are created every time something is logged. They 291 contain all the information pertinent to the event being logged. The 292 main information passed in is in msg and args, which are combined 293 using str(msg) % args to create the message field of the record. The 294 record also includes information such as when the record was created, 295 the source line where the logging call was made, and any exception 296 information to be logged. 297 """ 298 def __init__(self, name, level, pathname, lineno, 299 msg, args, exc_info, func=None, sinfo=None, **kwargs): 300 """ 301 Initialize a logging record with interesting information. 302 """ 303 ct = time.time_ns() 304 self.name = name 305 self.msg = msg 306 # 307 # The following statement allows passing of a dictionary as a sole 308 # argument, so that you can do something like 309 # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) 310 # Suggested by Stefan Behnel. 311 # Note that without the test for args[0], we get a problem because 312 # during formatting, we test to see if the arg is present using 313 # 'if self.args:'. If the event being logged is e.g. 'Value is %d' 314 # and if the passed arg fails 'if self.args:' then no formatting 315 # is done. For example, logger.warning('Value is %d', 0) would log 316 # 'Value is %d' instead of 'Value is 0'. 317 # For the use case of passing a dictionary, this should not be a 318 # problem. 319 # Issue #21172: a request was made to relax the isinstance check 320 # to hasattr(args[0], '__getitem__'). However, the docs on string 321 # formatting still seem to suggest a mapping object is required. 322 # Thus, while not removing the isinstance check, it does now look 323 # for collections.abc.Mapping rather than, as before, dict. 324 if (args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping) 325 and args[0]): 326 args = args[0] 327 self.args = args 328 self.levelname = getLevelName(level) 329 self.levelno = level 330 self.pathname = pathname 331 try: 332 self.filename = os.path.basename(pathname) 333 self.module = os.path.splitext(self.filename)[0] 334 except (TypeError, ValueError, AttributeError): 335 self.filename = pathname 336 self.module = "Unknown module" 337 self.exc_info = exc_info 338 self.exc_text = None # used to cache the traceback text 339 self.stack_info = sinfo 340 self.lineno = lineno 341 self.funcName = func 342 self.created = ct / 1e9 # ns to float seconds 343 # Get the number of whole milliseconds (0-999) in the fractional part of seconds. 344 # Eg: 1_677_903_920_999_998_503 ns --> 999_998_503 ns--> 999 ms 345 # Convert to float by adding 0.0 for historical reasons. See gh-89047 346 self.msecs = (ct % 1_000_000_000) // 1_000_000 + 0.0 347 if self.msecs == 999.0 and int(self.created) != ct // 1_000_000_000: 348 # ns -> sec conversion can round up, e.g: 349 # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec 350 self.msecs = 0.0 351 352 self.relativeCreated = (ct - _startTime) / 1e6 353 if logThreads: 354 self.thread = threading.get_ident() 355 self.threadName = threading.current_thread().name 356 else: # pragma: no cover 357 self.thread = None 358 self.threadName = None 359 if not logMultiprocessing: # pragma: no cover 360 self.processName = None 361 else: 362 self.processName = 'MainProcess' 363 mp = sys.modules.get('multiprocessing') 364 if mp is not None: 365 # Errors may occur if multiprocessing has not finished loading 366 # yet - e.g. if a custom import hook causes third-party code 367 # to run when multiprocessing calls import. See issue 8200 368 # for an example 369 try: 370 self.processName = mp.current_process().name 371 except Exception: #pragma: no cover 372 pass 373 if logProcesses and hasattr(os, 'getpid'): 374 self.process = os.getpid() 375 else: 376 self.process = None 377 378 self.taskName = None 379 if logAsyncioTasks: 380 asyncio = sys.modules.get('asyncio') 381 if asyncio: 382 try: 383 self.taskName = asyncio.current_task().get_name() 384 except Exception: 385 pass 386 387 def __repr__(self): 388 return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno, 389 self.pathname, self.lineno, self.msg) 390 391 def getMessage(self): 392 """ 393 Return the message for this LogRecord. 394 395 Return the message for this LogRecord after merging any user-supplied 396 arguments with the message. 397 """ 398 msg = str(self.msg) 399 if self.args: 400 msg = msg % self.args 401 return msg 402 403# 404# Determine which class to use when instantiating log records. 405# 406_logRecordFactory = LogRecord 407 408def setLogRecordFactory(factory): 409 """ 410 Set the factory to be used when instantiating a log record. 411 412 :param factory: A callable which will be called to instantiate 413 a log record. 414 """ 415 global _logRecordFactory 416 _logRecordFactory = factory 417 418def getLogRecordFactory(): 419 """ 420 Return the factory to be used when instantiating a log record. 421 """ 422 423 return _logRecordFactory 424 425def makeLogRecord(dict): 426 """ 427 Make a LogRecord whose attributes are defined by the specified dictionary, 428 This function is useful for converting a logging event received over 429 a socket connection (which is sent as a dictionary) into a LogRecord 430 instance. 431 """ 432 rv = _logRecordFactory(None, None, "", 0, "", (), None, None) 433 rv.__dict__.update(dict) 434 return rv 435 436 437#--------------------------------------------------------------------------- 438# Formatter classes and functions 439#--------------------------------------------------------------------------- 440_str_formatter = StrFormatter() 441del StrFormatter 442 443 444class PercentStyle(object): 445 446 default_format = '%(message)s' 447 asctime_format = '%(asctime)s' 448 asctime_search = '%(asctime)' 449 validation_pattern = re.compile(r'%\(\w+\)[#0+ -]*(\*|\d+)?(\.(\*|\d+))?[diouxefgcrsa%]', re.I) 450 451 def __init__(self, fmt, *, defaults=None): 452 self._fmt = fmt or self.default_format 453 self._defaults = defaults 454 455 def usesTime(self): 456 return self._fmt.find(self.asctime_search) >= 0 457 458 def validate(self): 459 """Validate the input format, ensure it matches the correct style""" 460 if not self.validation_pattern.search(self._fmt): 461 raise ValueError("Invalid format '%s' for '%s' style" % (self._fmt, self.default_format[0])) 462 463 def _format(self, record): 464 if defaults := self._defaults: 465 values = defaults | record.__dict__ 466 else: 467 values = record.__dict__ 468 return self._fmt % values 469 470 def format(self, record): 471 try: 472 return self._format(record) 473 except KeyError as e: 474 raise ValueError('Formatting field not found in record: %s' % e) 475 476 477class StrFormatStyle(PercentStyle): 478 default_format = '{message}' 479 asctime_format = '{asctime}' 480 asctime_search = '{asctime' 481 482 fmt_spec = re.compile(r'^(.?[<>=^])?[+ -]?#?0?(\d+|{\w+})?[,_]?(\.(\d+|{\w+}))?[bcdefgnosx%]?$', re.I) 483 field_spec = re.compile(r'^(\d+|\w+)(\.\w+|\[[^]]+\])*$') 484 485 def _format(self, record): 486 if defaults := self._defaults: 487 values = defaults | record.__dict__ 488 else: 489 values = record.__dict__ 490 return self._fmt.format(**values) 491 492 def validate(self): 493 """Validate the input format, ensure it is the correct string formatting style""" 494 fields = set() 495 try: 496 for _, fieldname, spec, conversion in _str_formatter.parse(self._fmt): 497 if fieldname: 498 if not self.field_spec.match(fieldname): 499 raise ValueError('invalid field name/expression: %r' % fieldname) 500 fields.add(fieldname) 501 if conversion and conversion not in 'rsa': 502 raise ValueError('invalid conversion: %r' % conversion) 503 if spec and not self.fmt_spec.match(spec): 504 raise ValueError('bad specifier: %r' % spec) 505 except ValueError as e: 506 raise ValueError('invalid format: %s' % e) 507 if not fields: 508 raise ValueError('invalid format: no fields') 509 510 511class StringTemplateStyle(PercentStyle): 512 default_format = '${message}' 513 asctime_format = '${asctime}' 514 asctime_search = '${asctime}' 515 516 def __init__(self, *args, **kwargs): 517 super().__init__(*args, **kwargs) 518 self._tpl = Template(self._fmt) 519 520 def usesTime(self): 521 fmt = self._fmt 522 return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_search) >= 0 523 524 def validate(self): 525 pattern = Template.pattern 526 fields = set() 527 for m in pattern.finditer(self._fmt): 528 d = m.groupdict() 529 if d['named']: 530 fields.add(d['named']) 531 elif d['braced']: 532 fields.add(d['braced']) 533 elif m.group(0) == '$': 534 raise ValueError('invalid format: bare \'$\' not allowed') 535 if not fields: 536 raise ValueError('invalid format: no fields') 537 538 def _format(self, record): 539 if defaults := self._defaults: 540 values = defaults | record.__dict__ 541 else: 542 values = record.__dict__ 543 return self._tpl.substitute(**values) 544 545 546BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s" 547 548_STYLES = { 549 '%': (PercentStyle, BASIC_FORMAT), 550 '{': (StrFormatStyle, '{levelname}:{name}:{message}'), 551 '$': (StringTemplateStyle, '${levelname}:${name}:${message}'), 552} 553 554class Formatter(object): 555 """ 556 Formatter instances are used to convert a LogRecord to text. 557 558 Formatters need to know how a LogRecord is constructed. They are 559 responsible for converting a LogRecord to (usually) a string which can 560 be interpreted by either a human or an external system. The base Formatter 561 allows a formatting string to be specified. If none is supplied, the 562 style-dependent default value, "%(message)s", "{message}", or 563 "${message}", is used. 564 565 The Formatter can be initialized with a format string which makes use of 566 knowledge of the LogRecord attributes - e.g. the default value mentioned 567 above makes use of the fact that the user's message and arguments are pre- 568 formatted into a LogRecord's message attribute. Currently, the useful 569 attributes in a LogRecord are described by: 570 571 %(name)s Name of the logger (logging channel) 572 %(levelno)s Numeric logging level for the message (DEBUG, INFO, 573 WARNING, ERROR, CRITICAL) 574 %(levelname)s Text logging level for the message ("DEBUG", "INFO", 575 "WARNING", "ERROR", "CRITICAL") 576 %(pathname)s Full pathname of the source file where the logging 577 call was issued (if available) 578 %(filename)s Filename portion of pathname 579 %(module)s Module (name portion of filename) 580 %(lineno)d Source line number where the logging call was issued 581 (if available) 582 %(funcName)s Function name 583 %(created)f Time when the LogRecord was created (time.time_ns() / 1e9 584 return value) 585 %(asctime)s Textual time when the LogRecord was created 586 %(msecs)d Millisecond portion of the creation time 587 %(relativeCreated)d Time in milliseconds when the LogRecord was created, 588 relative to the time the logging module was loaded 589 (typically at application startup time) 590 %(thread)d Thread ID (if available) 591 %(threadName)s Thread name (if available) 592 %(taskName)s Task name (if available) 593 %(process)d Process ID (if available) 594 %(message)s The result of record.getMessage(), computed just as 595 the record is emitted 596 """ 597 598 converter = time.localtime 599 600 def __init__(self, fmt=None, datefmt=None, style='%', validate=True, *, 601 defaults=None): 602 """ 603 Initialize the formatter with specified format strings. 604 605 Initialize the formatter either with the specified format string, or a 606 default as described above. Allow for specialized date formatting with 607 the optional datefmt argument. If datefmt is omitted, you get an 608 ISO8601-like (or RFC 3339-like) format. 609 610 Use a style parameter of '%', '{' or '$' to specify that you want to 611 use one of %-formatting, :meth:`str.format` (``{}``) formatting or 612 :class:`string.Template` formatting in your format string. 613 614 .. versionchanged:: 3.2 615 Added the ``style`` parameter. 616 """ 617 if style not in _STYLES: 618 raise ValueError('Style must be one of: %s' % ','.join( 619 _STYLES.keys())) 620 self._style = _STYLES[style][0](fmt, defaults=defaults) 621 if validate: 622 self._style.validate() 623 624 self._fmt = self._style._fmt 625 self.datefmt = datefmt 626 627 default_time_format = '%Y-%m-%d %H:%M:%S' 628 default_msec_format = '%s,%03d' 629 630 def formatTime(self, record, datefmt=None): 631 """ 632 Return the creation time of the specified LogRecord as formatted text. 633 634 This method should be called from format() by a formatter which 635 wants to make use of a formatted time. This method can be overridden 636 in formatters to provide for any specific requirement, but the 637 basic behaviour is as follows: if datefmt (a string) is specified, 638 it is used with time.strftime() to format the creation time of the 639 record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. 640 The resulting string is returned. This function uses a user-configurable 641 function to convert the creation time to a tuple. By default, 642 time.localtime() is used; to change this for a particular formatter 643 instance, set the 'converter' attribute to a function with the same 644 signature as time.localtime() or time.gmtime(). To change it for all 645 formatters, for example if you want all logging times to be shown in GMT, 646 set the 'converter' attribute in the Formatter class. 647 """ 648 ct = self.converter(record.created) 649 if datefmt: 650 s = time.strftime(datefmt, ct) 651 else: 652 s = time.strftime(self.default_time_format, ct) 653 if self.default_msec_format: 654 s = self.default_msec_format % (s, record.msecs) 655 return s 656 657 def formatException(self, ei): 658 """ 659 Format and return the specified exception information as a string. 660 661 This default implementation just uses 662 traceback.print_exception() 663 """ 664 sio = io.StringIO() 665 tb = ei[2] 666 # See issues #9427, #1553375. Commented out for now. 667 #if getattr(self, 'fullstack', False): 668 # traceback.print_stack(tb.tb_frame.f_back, file=sio) 669 traceback.print_exception(ei[0], ei[1], tb, limit=None, file=sio) 670 s = sio.getvalue() 671 sio.close() 672 if s[-1:] == "\n": 673 s = s[:-1] 674 return s 675 676 def usesTime(self): 677 """ 678 Check if the format uses the creation time of the record. 679 """ 680 return self._style.usesTime() 681 682 def formatMessage(self, record): 683 return self._style.format(record) 684 685 def formatStack(self, stack_info): 686 """ 687 This method is provided as an extension point for specialized 688 formatting of stack information. 689 690 The input data is a string as returned from a call to 691 :func:`traceback.print_stack`, but with the last trailing newline 692 removed. 693 694 The base implementation just returns the value passed in. 695 """ 696 return stack_info 697 698 def format(self, record): 699 """ 700 Format the specified record as text. 701 702 The record's attribute dictionary is used as the operand to a 703 string formatting operation which yields the returned string. 704 Before formatting the dictionary, a couple of preparatory steps 705 are carried out. The message attribute of the record is computed 706 using LogRecord.getMessage(). If the formatting string uses the 707 time (as determined by a call to usesTime(), formatTime() is 708 called to format the event time. If there is exception information, 709 it is formatted using formatException() and appended to the message. 710 """ 711 record.message = record.getMessage() 712 if self.usesTime(): 713 record.asctime = self.formatTime(record, self.datefmt) 714 s = self.formatMessage(record) 715 if record.exc_info: 716 # Cache the traceback text to avoid converting it multiple times 717 # (it's constant anyway) 718 if not record.exc_text: 719 record.exc_text = self.formatException(record.exc_info) 720 if record.exc_text: 721 if s[-1:] != "\n": 722 s = s + "\n" 723 s = s + record.exc_text 724 if record.stack_info: 725 if s[-1:] != "\n": 726 s = s + "\n" 727 s = s + self.formatStack(record.stack_info) 728 return s 729 730# 731# The default formatter to use when no other is specified 732# 733_defaultFormatter = Formatter() 734 735class BufferingFormatter(object): 736 """ 737 A formatter suitable for formatting a number of records. 738 """ 739 def __init__(self, linefmt=None): 740 """ 741 Optionally specify a formatter which will be used to format each 742 individual record. 743 """ 744 if linefmt: 745 self.linefmt = linefmt 746 else: 747 self.linefmt = _defaultFormatter 748 749 def formatHeader(self, records): 750 """ 751 Return the header string for the specified records. 752 """ 753 return "" 754 755 def formatFooter(self, records): 756 """ 757 Return the footer string for the specified records. 758 """ 759 return "" 760 761 def format(self, records): 762 """ 763 Format the specified records and return the result as a string. 764 """ 765 rv = "" 766 if len(records) > 0: 767 rv = rv + self.formatHeader(records) 768 for record in records: 769 rv = rv + self.linefmt.format(record) 770 rv = rv + self.formatFooter(records) 771 return rv 772 773#--------------------------------------------------------------------------- 774# Filter classes and functions 775#--------------------------------------------------------------------------- 776 777class Filter(object): 778 """ 779 Filter instances are used to perform arbitrary filtering of LogRecords. 780 781 Loggers and Handlers can optionally use Filter instances to filter 782 records as desired. The base filter class only allows events which are 783 below a certain point in the logger hierarchy. For example, a filter 784 initialized with "A.B" will allow events logged by loggers "A.B", 785 "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If 786 initialized with the empty string, all events are passed. 787 """ 788 def __init__(self, name=''): 789 """ 790 Initialize a filter. 791 792 Initialize with the name of the logger which, together with its 793 children, will have its events allowed through the filter. If no 794 name is specified, allow every event. 795 """ 796 self.name = name 797 self.nlen = len(name) 798 799 def filter(self, record): 800 """ 801 Determine if the specified record is to be logged. 802 803 Returns True if the record should be logged, or False otherwise. 804 If deemed appropriate, the record may be modified in-place. 805 """ 806 if self.nlen == 0: 807 return True 808 elif self.name == record.name: 809 return True 810 elif record.name.find(self.name, 0, self.nlen) != 0: 811 return False 812 return (record.name[self.nlen] == ".") 813 814class Filterer(object): 815 """ 816 A base class for loggers and handlers which allows them to share 817 common code. 818 """ 819 def __init__(self): 820 """ 821 Initialize the list of filters to be an empty list. 822 """ 823 self.filters = [] 824 825 def addFilter(self, filter): 826 """ 827 Add the specified filter to this handler. 828 """ 829 if not (filter in self.filters): 830 self.filters.append(filter) 831 832 def removeFilter(self, filter): 833 """ 834 Remove the specified filter from this handler. 835 """ 836 if filter in self.filters: 837 self.filters.remove(filter) 838 839 def filter(self, record): 840 """ 841 Determine if a record is loggable by consulting all the filters. 842 843 The default is to allow the record to be logged; any filter can veto 844 this by returning a false value. 845 If a filter attached to a handler returns a log record instance, 846 then that instance is used in place of the original log record in 847 any further processing of the event by that handler. 848 If a filter returns any other true value, the original log record 849 is used in any further processing of the event by that handler. 850 851 If none of the filters return false values, this method returns 852 a log record. 853 If any of the filters return a false value, this method returns 854 a false value. 855 856 .. versionchanged:: 3.2 857 858 Allow filters to be just callables. 859 860 .. versionchanged:: 3.12 861 Allow filters to return a LogRecord instead of 862 modifying it in place. 863 """ 864 for f in self.filters: 865 if hasattr(f, 'filter'): 866 result = f.filter(record) 867 else: 868 result = f(record) # assume callable - will raise if not 869 if not result: 870 return False 871 if isinstance(result, LogRecord): 872 record = result 873 return record 874 875#--------------------------------------------------------------------------- 876# Handler classes and functions 877#--------------------------------------------------------------------------- 878 879_handlers = weakref.WeakValueDictionary() #map of handler names to handlers 880_handlerList = [] # added to allow handlers to be removed in reverse of order initialized 881 882def _removeHandlerRef(wr): 883 """ 884 Remove a handler reference from the internal cleanup list. 885 """ 886 # This function can be called during module teardown, when globals are 887 # set to None. It can also be called from another thread. So we need to 888 # pre-emptively grab the necessary globals and check if they're None, 889 # to prevent race conditions and failures during interpreter shutdown. 890 handlers, lock = _handlerList, _lock 891 if lock and handlers: 892 with lock: 893 try: 894 handlers.remove(wr) 895 except ValueError: 896 pass 897 898def _addHandlerRef(handler): 899 """ 900 Add a handler to the internal cleanup list using a weak reference. 901 """ 902 with _lock: 903 _handlerList.append(weakref.ref(handler, _removeHandlerRef)) 904 905 906def getHandlerByName(name): 907 """ 908 Get a handler with the specified *name*, or None if there isn't one with 909 that name. 910 """ 911 return _handlers.get(name) 912 913 914def getHandlerNames(): 915 """ 916 Return all known handler names as an immutable set. 917 """ 918 return frozenset(_handlers) 919 920 921class Handler(Filterer): 922 """ 923 Handler instances dispatch logging events to specific destinations. 924 925 The base handler class. Acts as a placeholder which defines the Handler 926 interface. Handlers can optionally use Formatter instances to format 927 records as desired. By default, no formatter is specified; in this case, 928 the 'raw' message as determined by record.message is logged. 929 """ 930 def __init__(self, level=NOTSET): 931 """ 932 Initializes the instance - basically setting the formatter to None 933 and the filter list to empty. 934 """ 935 Filterer.__init__(self) 936 self._name = None 937 self.level = _checkLevel(level) 938 self.formatter = None 939 self._closed = False 940 # Add the handler to the global _handlerList (for cleanup on shutdown) 941 _addHandlerRef(self) 942 self.createLock() 943 944 def get_name(self): 945 return self._name 946 947 def set_name(self, name): 948 with _lock: 949 if self._name in _handlers: 950 del _handlers[self._name] 951 self._name = name 952 if name: 953 _handlers[name] = self 954 955 name = property(get_name, set_name) 956 957 def createLock(self): 958 """ 959 Acquire a thread lock for serializing access to the underlying I/O. 960 """ 961 self.lock = threading.RLock() 962 _register_at_fork_reinit_lock(self) 963 964 def _at_fork_reinit(self): 965 self.lock._at_fork_reinit() 966 967 def acquire(self): 968 """ 969 Acquire the I/O thread lock. 970 """ 971 if self.lock: 972 self.lock.acquire() 973 974 def release(self): 975 """ 976 Release the I/O thread lock. 977 """ 978 if self.lock: 979 self.lock.release() 980 981 def setLevel(self, level): 982 """ 983 Set the logging level of this handler. level must be an int or a str. 984 """ 985 self.level = _checkLevel(level) 986 987 def format(self, record): 988 """ 989 Format the specified record. 990 991 If a formatter is set, use it. Otherwise, use the default formatter 992 for the module. 993 """ 994 if self.formatter: 995 fmt = self.formatter 996 else: 997 fmt = _defaultFormatter 998 return fmt.format(record) 999 1000 def emit(self, record): 1001 """ 1002 Do whatever it takes to actually log the specified logging record. 1003 1004 This version is intended to be implemented by subclasses and so 1005 raises a NotImplementedError. 1006 """ 1007 raise NotImplementedError('emit must be implemented ' 1008 'by Handler subclasses') 1009 1010 def handle(self, record): 1011 """ 1012 Conditionally emit the specified logging record. 1013 1014 Emission depends on filters which may have been added to the handler. 1015 Wrap the actual emission of the record with acquisition/release of 1016 the I/O thread lock. 1017 1018 Returns an instance of the log record that was emitted 1019 if it passed all filters, otherwise a false value is returned. 1020 """ 1021 rv = self.filter(record) 1022 if isinstance(rv, LogRecord): 1023 record = rv 1024 if rv: 1025 with self.lock: 1026 self.emit(record) 1027 return rv 1028 1029 def setFormatter(self, fmt): 1030 """ 1031 Set the formatter for this handler. 1032 """ 1033 self.formatter = fmt 1034 1035 def flush(self): 1036 """ 1037 Ensure all logging output has been flushed. 1038 1039 This version does nothing and is intended to be implemented by 1040 subclasses. 1041 """ 1042 pass 1043 1044 def close(self): 1045 """ 1046 Tidy up any resources used by the handler. 1047 1048 This version removes the handler from an internal map of handlers, 1049 _handlers, which is used for handler lookup by name. Subclasses 1050 should ensure that this gets called from overridden close() 1051 methods. 1052 """ 1053 #get the module data lock, as we're updating a shared structure. 1054 with _lock: 1055 self._closed = True 1056 if self._name and self._name in _handlers: 1057 del _handlers[self._name] 1058 1059 def handleError(self, record): 1060 """ 1061 Handle errors which occur during an emit() call. 1062 1063 This method should be called from handlers when an exception is 1064 encountered during an emit() call. If raiseExceptions is false, 1065 exceptions get silently ignored. This is what is mostly wanted 1066 for a logging system - most users will not care about errors in 1067 the logging system, they are more interested in application errors. 1068 You could, however, replace this with a custom handler if you wish. 1069 The record which was being processed is passed in to this method. 1070 """ 1071 if raiseExceptions and sys.stderr: # see issue 13807 1072 exc = sys.exception() 1073 try: 1074 sys.stderr.write('--- Logging error ---\n') 1075 traceback.print_exception(exc, limit=None, file=sys.stderr) 1076 sys.stderr.write('Call stack:\n') 1077 # Walk the stack frame up until we're out of logging, 1078 # so as to print the calling context. 1079 frame = exc.__traceback__.tb_frame 1080 while (frame and os.path.dirname(frame.f_code.co_filename) == 1081 __path__[0]): 1082 frame = frame.f_back 1083 if frame: 1084 traceback.print_stack(frame, file=sys.stderr) 1085 else: 1086 # couldn't find the right stack frame, for some reason 1087 sys.stderr.write('Logged from file %s, line %s\n' % ( 1088 record.filename, record.lineno)) 1089 # Issue 18671: output logging message and arguments 1090 try: 1091 sys.stderr.write('Message: %r\n' 1092 'Arguments: %s\n' % (record.msg, 1093 record.args)) 1094 except RecursionError: # See issue 36272 1095 raise 1096 except Exception: 1097 sys.stderr.write('Unable to print the message and arguments' 1098 ' - possible formatting error.\nUse the' 1099 ' traceback above to help find the error.\n' 1100 ) 1101 except OSError: #pragma: no cover 1102 pass # see issue 5971 1103 finally: 1104 del exc 1105 1106 def __repr__(self): 1107 level = getLevelName(self.level) 1108 return '<%s (%s)>' % (self.__class__.__name__, level) 1109 1110class StreamHandler(Handler): 1111 """ 1112 A handler class which writes logging records, appropriately formatted, 1113 to a stream. Note that this class does not close the stream, as 1114 sys.stdout or sys.stderr may be used. 1115 """ 1116 1117 terminator = '\n' 1118 1119 def __init__(self, stream=None): 1120 """ 1121 Initialize the handler. 1122 1123 If stream is not specified, sys.stderr is used. 1124 """ 1125 Handler.__init__(self) 1126 if stream is None: 1127 stream = sys.stderr 1128 self.stream = stream 1129 1130 def flush(self): 1131 """ 1132 Flushes the stream. 1133 """ 1134 with self.lock: 1135 if self.stream and hasattr(self.stream, "flush"): 1136 self.stream.flush() 1137 1138 def emit(self, record): 1139 """ 1140 Emit a record. 1141 1142 If a formatter is specified, it is used to format the record. 1143 The record is then written to the stream with a trailing newline. If 1144 exception information is present, it is formatted using 1145 traceback.print_exception and appended to the stream. If the stream 1146 has an 'encoding' attribute, it is used to determine how to do the 1147 output to the stream. 1148 """ 1149 try: 1150 msg = self.format(record) 1151 stream = self.stream 1152 # issue 35046: merged two stream.writes into one. 1153 stream.write(msg + self.terminator) 1154 self.flush() 1155 except RecursionError: # See issue 36272 1156 raise 1157 except Exception: 1158 self.handleError(record) 1159 1160 def setStream(self, stream): 1161 """ 1162 Sets the StreamHandler's stream to the specified value, 1163 if it is different. 1164 1165 Returns the old stream, if the stream was changed, or None 1166 if it wasn't. 1167 """ 1168 if stream is self.stream: 1169 result = None 1170 else: 1171 result = self.stream 1172 with self.lock: 1173 self.flush() 1174 self.stream = stream 1175 return result 1176 1177 def __repr__(self): 1178 level = getLevelName(self.level) 1179 name = getattr(self.stream, 'name', '') 1180 # bpo-36015: name can be an int 1181 name = str(name) 1182 if name: 1183 name += ' ' 1184 return '<%s %s(%s)>' % (self.__class__.__name__, name, level) 1185 1186 __class_getitem__ = classmethod(GenericAlias) 1187 1188 1189class FileHandler(StreamHandler): 1190 """ 1191 A handler class which writes formatted logging records to disk files. 1192 """ 1193 def __init__(self, filename, mode='a', encoding=None, delay=False, errors=None): 1194 """ 1195 Open the specified file and use it as the stream for logging. 1196 """ 1197 # Issue #27493: add support for Path objects to be passed in 1198 filename = os.fspath(filename) 1199 #keep the absolute path, otherwise derived classes which use this 1200 #may come a cropper when the current directory changes 1201 self.baseFilename = os.path.abspath(filename) 1202 self.mode = mode 1203 self.encoding = encoding 1204 if "b" not in mode: 1205 self.encoding = io.text_encoding(encoding) 1206 self.errors = errors 1207 self.delay = delay 1208 # bpo-26789: FileHandler keeps a reference to the builtin open() 1209 # function to be able to open or reopen the file during Python 1210 # finalization. 1211 self._builtin_open = open 1212 if delay: 1213 #We don't open the stream, but we still need to call the 1214 #Handler constructor to set level, formatter, lock etc. 1215 Handler.__init__(self) 1216 self.stream = None 1217 else: 1218 StreamHandler.__init__(self, self._open()) 1219 1220 def close(self): 1221 """ 1222 Closes the stream. 1223 """ 1224 with self.lock: 1225 try: 1226 if self.stream: 1227 try: 1228 self.flush() 1229 finally: 1230 stream = self.stream 1231 self.stream = None 1232 if hasattr(stream, "close"): 1233 stream.close() 1234 finally: 1235 # Issue #19523: call unconditionally to 1236 # prevent a handler leak when delay is set 1237 # Also see Issue #42378: we also rely on 1238 # self._closed being set to True there 1239 StreamHandler.close(self) 1240 1241 def _open(self): 1242 """ 1243 Open the current base file with the (original) mode and encoding. 1244 Return the resulting stream. 1245 """ 1246 open_func = self._builtin_open 1247 return open_func(self.baseFilename, self.mode, 1248 encoding=self.encoding, errors=self.errors) 1249 1250 def emit(self, record): 1251 """ 1252 Emit a record. 1253 1254 If the stream was not opened because 'delay' was specified in the 1255 constructor, open it before calling the superclass's emit. 1256 1257 If stream is not open, current mode is 'w' and `_closed=True`, record 1258 will not be emitted (see Issue #42378). 1259 """ 1260 if self.stream is None: 1261 if self.mode != 'w' or not self._closed: 1262 self.stream = self._open() 1263 if self.stream: 1264 StreamHandler.emit(self, record) 1265 1266 def __repr__(self): 1267 level = getLevelName(self.level) 1268 return '<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level) 1269 1270 1271class _StderrHandler(StreamHandler): 1272 """ 1273 This class is like a StreamHandler using sys.stderr, but always uses 1274 whatever sys.stderr is currently set to rather than the value of 1275 sys.stderr at handler construction time. 1276 """ 1277 def __init__(self, level=NOTSET): 1278 """ 1279 Initialize the handler. 1280 """ 1281 Handler.__init__(self, level) 1282 1283 @property 1284 def stream(self): 1285 return sys.stderr 1286 1287 1288_defaultLastResort = _StderrHandler(WARNING) 1289lastResort = _defaultLastResort 1290 1291#--------------------------------------------------------------------------- 1292# Manager classes and functions 1293#--------------------------------------------------------------------------- 1294 1295class PlaceHolder(object): 1296 """ 1297 PlaceHolder instances are used in the Manager logger hierarchy to take 1298 the place of nodes for which no loggers have been defined. This class is 1299 intended for internal use only and not as part of the public API. 1300 """ 1301 def __init__(self, alogger): 1302 """ 1303 Initialize with the specified logger being a child of this placeholder. 1304 """ 1305 self.loggerMap = { alogger : None } 1306 1307 def append(self, alogger): 1308 """ 1309 Add the specified logger as a child of this placeholder. 1310 """ 1311 if alogger not in self.loggerMap: 1312 self.loggerMap[alogger] = None 1313 1314# 1315# Determine which class to use when instantiating loggers. 1316# 1317 1318def setLoggerClass(klass): 1319 """ 1320 Set the class to be used when instantiating a logger. The class should 1321 define __init__() such that only a name argument is required, and the 1322 __init__() should call Logger.__init__() 1323 """ 1324 if klass != Logger: 1325 if not issubclass(klass, Logger): 1326 raise TypeError("logger not derived from logging.Logger: " 1327 + klass.__name__) 1328 global _loggerClass 1329 _loggerClass = klass 1330 1331def getLoggerClass(): 1332 """ 1333 Return the class to be used when instantiating a logger. 1334 """ 1335 return _loggerClass 1336 1337class Manager(object): 1338 """ 1339 There is [under normal circumstances] just one Manager instance, which 1340 holds the hierarchy of loggers. 1341 """ 1342 def __init__(self, rootnode): 1343 """ 1344 Initialize the manager with the root node of the logger hierarchy. 1345 """ 1346 self.root = rootnode 1347 self.disable = 0 1348 self.emittedNoHandlerWarning = False 1349 self.loggerDict = {} 1350 self.loggerClass = None 1351 self.logRecordFactory = None 1352 1353 @property 1354 def disable(self): 1355 return self._disable 1356 1357 @disable.setter 1358 def disable(self, value): 1359 self._disable = _checkLevel(value) 1360 1361 def getLogger(self, name): 1362 """ 1363 Get a logger with the specified name (channel name), creating it 1364 if it doesn't yet exist. This name is a dot-separated hierarchical 1365 name, such as "a", "a.b", "a.b.c" or similar. 1366 1367 If a PlaceHolder existed for the specified name [i.e. the logger 1368 didn't exist but a child of it did], replace it with the created 1369 logger and fix up the parent/child references which pointed to the 1370 placeholder to now point to the logger. 1371 """ 1372 rv = None 1373 if not isinstance(name, str): 1374 raise TypeError('A logger name must be a string') 1375 with _lock: 1376 if name in self.loggerDict: 1377 rv = self.loggerDict[name] 1378 if isinstance(rv, PlaceHolder): 1379 ph = rv 1380 rv = (self.loggerClass or _loggerClass)(name) 1381 rv.manager = self 1382 self.loggerDict[name] = rv 1383 self._fixupChildren(ph, rv) 1384 self._fixupParents(rv) 1385 else: 1386 rv = (self.loggerClass or _loggerClass)(name) 1387 rv.manager = self 1388 self.loggerDict[name] = rv 1389 self._fixupParents(rv) 1390 return rv 1391 1392 def setLoggerClass(self, klass): 1393 """ 1394 Set the class to be used when instantiating a logger with this Manager. 1395 """ 1396 if klass != Logger: 1397 if not issubclass(klass, Logger): 1398 raise TypeError("logger not derived from logging.Logger: " 1399 + klass.__name__) 1400 self.loggerClass = klass 1401 1402 def setLogRecordFactory(self, factory): 1403 """ 1404 Set the factory to be used when instantiating a log record with this 1405 Manager. 1406 """ 1407 self.logRecordFactory = factory 1408 1409 def _fixupParents(self, alogger): 1410 """ 1411 Ensure that there are either loggers or placeholders all the way 1412 from the specified logger to the root of the logger hierarchy. 1413 """ 1414 name = alogger.name 1415 i = name.rfind(".") 1416 rv = None 1417 while (i > 0) and not rv: 1418 substr = name[:i] 1419 if substr not in self.loggerDict: 1420 self.loggerDict[substr] = PlaceHolder(alogger) 1421 else: 1422 obj = self.loggerDict[substr] 1423 if isinstance(obj, Logger): 1424 rv = obj 1425 else: 1426 assert isinstance(obj, PlaceHolder) 1427 obj.append(alogger) 1428 i = name.rfind(".", 0, i - 1) 1429 if not rv: 1430 rv = self.root 1431 alogger.parent = rv 1432 1433 def _fixupChildren(self, ph, alogger): 1434 """ 1435 Ensure that children of the placeholder ph are connected to the 1436 specified logger. 1437 """ 1438 name = alogger.name 1439 namelen = len(name) 1440 for c in ph.loggerMap.keys(): 1441 #The if means ... if not c.parent.name.startswith(nm) 1442 if c.parent.name[:namelen] != name: 1443 alogger.parent = c.parent 1444 c.parent = alogger 1445 1446 def _clear_cache(self): 1447 """ 1448 Clear the cache for all loggers in loggerDict 1449 Called when level changes are made 1450 """ 1451 1452 with _lock: 1453 for logger in self.loggerDict.values(): 1454 if isinstance(logger, Logger): 1455 logger._cache.clear() 1456 self.root._cache.clear() 1457 1458#--------------------------------------------------------------------------- 1459# Logger classes and functions 1460#--------------------------------------------------------------------------- 1461 1462class Logger(Filterer): 1463 """ 1464 Instances of the Logger class represent a single logging channel. A 1465 "logging channel" indicates an area of an application. Exactly how an 1466 "area" is defined is up to the application developer. Since an 1467 application can have any number of areas, logging channels are identified 1468 by a unique string. Application areas can be nested (e.g. an area 1469 of "input processing" might include sub-areas "read CSV files", "read 1470 XLS files" and "read Gnumeric files"). To cater for this natural nesting, 1471 channel names are organized into a namespace hierarchy where levels are 1472 separated by periods, much like the Java or Python package namespace. So 1473 in the instance given above, channel names might be "input" for the upper 1474 level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. 1475 There is no arbitrary limit to the depth of nesting. 1476 """ 1477 def __init__(self, name, level=NOTSET): 1478 """ 1479 Initialize the logger with a name and an optional level. 1480 """ 1481 Filterer.__init__(self) 1482 self.name = name 1483 self.level = _checkLevel(level) 1484 self.parent = None 1485 self.propagate = True 1486 self.handlers = [] 1487 self.disabled = False 1488 self._cache = {} 1489 1490 def setLevel(self, level): 1491 """ 1492 Set the logging level of this logger. level must be an int or a str. 1493 """ 1494 self.level = _checkLevel(level) 1495 self.manager._clear_cache() 1496 1497 def debug(self, msg, *args, **kwargs): 1498 """ 1499 Log 'msg % args' with severity 'DEBUG'. 1500 1501 To pass exception information, use the keyword argument exc_info with 1502 a true value, e.g. 1503 1504 logger.debug("Houston, we have a %s", "thorny problem", exc_info=True) 1505 """ 1506 if self.isEnabledFor(DEBUG): 1507 self._log(DEBUG, msg, args, **kwargs) 1508 1509 def info(self, msg, *args, **kwargs): 1510 """ 1511 Log 'msg % args' with severity 'INFO'. 1512 1513 To pass exception information, use the keyword argument exc_info with 1514 a true value, e.g. 1515 1516 logger.info("Houston, we have a %s", "notable problem", exc_info=True) 1517 """ 1518 if self.isEnabledFor(INFO): 1519 self._log(INFO, msg, args, **kwargs) 1520 1521 def warning(self, msg, *args, **kwargs): 1522 """ 1523 Log 'msg % args' with severity 'WARNING'. 1524 1525 To pass exception information, use the keyword argument exc_info with 1526 a true value, e.g. 1527 1528 logger.warning("Houston, we have a %s", "bit of a problem", exc_info=True) 1529 """ 1530 if self.isEnabledFor(WARNING): 1531 self._log(WARNING, msg, args, **kwargs) 1532 1533 def warn(self, msg, *args, **kwargs): 1534 warnings.warn("The 'warn' method is deprecated, " 1535 "use 'warning' instead", DeprecationWarning, 2) 1536 self.warning(msg, *args, **kwargs) 1537 1538 def error(self, msg, *args, **kwargs): 1539 """ 1540 Log 'msg % args' with severity 'ERROR'. 1541 1542 To pass exception information, use the keyword argument exc_info with 1543 a true value, e.g. 1544 1545 logger.error("Houston, we have a %s", "major problem", exc_info=True) 1546 """ 1547 if self.isEnabledFor(ERROR): 1548 self._log(ERROR, msg, args, **kwargs) 1549 1550 def exception(self, msg, *args, exc_info=True, **kwargs): 1551 """ 1552 Convenience method for logging an ERROR with exception information. 1553 """ 1554 self.error(msg, *args, exc_info=exc_info, **kwargs) 1555 1556 def critical(self, msg, *args, **kwargs): 1557 """ 1558 Log 'msg % args' with severity 'CRITICAL'. 1559 1560 To pass exception information, use the keyword argument exc_info with 1561 a true value, e.g. 1562 1563 logger.critical("Houston, we have a %s", "major disaster", exc_info=True) 1564 """ 1565 if self.isEnabledFor(CRITICAL): 1566 self._log(CRITICAL, msg, args, **kwargs) 1567 1568 def fatal(self, msg, *args, **kwargs): 1569 """ 1570 Don't use this method, use critical() instead. 1571 """ 1572 self.critical(msg, *args, **kwargs) 1573 1574 def log(self, level, msg, *args, **kwargs): 1575 """ 1576 Log 'msg % args' with the integer severity 'level'. 1577 1578 To pass exception information, use the keyword argument exc_info with 1579 a true value, e.g. 1580 1581 logger.log(level, "We have a %s", "mysterious problem", exc_info=True) 1582 """ 1583 if not isinstance(level, int): 1584 if raiseExceptions: 1585 raise TypeError("level must be an integer") 1586 else: 1587 return 1588 if self.isEnabledFor(level): 1589 self._log(level, msg, args, **kwargs) 1590 1591 def findCaller(self, stack_info=False, stacklevel=1): 1592 """ 1593 Find the stack frame of the caller so that we can note the source 1594 file name, line number and function name. 1595 """ 1596 f = currentframe() 1597 #On some versions of IronPython, currentframe() returns None if 1598 #IronPython isn't run with -X:Frames. 1599 if f is None: 1600 return "(unknown file)", 0, "(unknown function)", None 1601 while stacklevel > 0: 1602 next_f = f.f_back 1603 if next_f is None: 1604 ## We've got options here. 1605 ## If we want to use the last (deepest) frame: 1606 break 1607 ## If we want to mimic the warnings module: 1608 #return ("sys", 1, "(unknown function)", None) 1609 ## If we want to be pedantic: 1610 #raise ValueError("call stack is not deep enough") 1611 f = next_f 1612 if not _is_internal_frame(f): 1613 stacklevel -= 1 1614 co = f.f_code 1615 sinfo = None 1616 if stack_info: 1617 with io.StringIO() as sio: 1618 sio.write("Stack (most recent call last):\n") 1619 traceback.print_stack(f, file=sio) 1620 sinfo = sio.getvalue() 1621 if sinfo[-1] == '\n': 1622 sinfo = sinfo[:-1] 1623 return co.co_filename, f.f_lineno, co.co_name, sinfo 1624 1625 def makeRecord(self, name, level, fn, lno, msg, args, exc_info, 1626 func=None, extra=None, sinfo=None): 1627 """ 1628 A factory method which can be overridden in subclasses to create 1629 specialized LogRecords. 1630 """ 1631 rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func, 1632 sinfo) 1633 if extra is not None: 1634 for key in extra: 1635 if (key in ["message", "asctime"]) or (key in rv.__dict__): 1636 raise KeyError("Attempt to overwrite %r in LogRecord" % key) 1637 rv.__dict__[key] = extra[key] 1638 return rv 1639 1640 def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False, 1641 stacklevel=1): 1642 """ 1643 Low-level logging routine which creates a LogRecord and then calls 1644 all the handlers of this logger to handle the record. 1645 """ 1646 sinfo = None 1647 if _srcfile: 1648 #IronPython doesn't track Python frames, so findCaller raises an 1649 #exception on some versions of IronPython. We trap it here so that 1650 #IronPython can use logging. 1651 try: 1652 fn, lno, func, sinfo = self.findCaller(stack_info, stacklevel) 1653 except ValueError: # pragma: no cover 1654 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1655 else: # pragma: no cover 1656 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1657 if exc_info: 1658 if isinstance(exc_info, BaseException): 1659 exc_info = (type(exc_info), exc_info, exc_info.__traceback__) 1660 elif not isinstance(exc_info, tuple): 1661 exc_info = sys.exc_info() 1662 record = self.makeRecord(self.name, level, fn, lno, msg, args, 1663 exc_info, func, extra, sinfo) 1664 self.handle(record) 1665 1666 def handle(self, record): 1667 """ 1668 Call the handlers for the specified record. 1669 1670 This method is used for unpickled records received from a socket, as 1671 well as those created locally. Logger-level filtering is applied. 1672 """ 1673 if self.disabled: 1674 return 1675 maybe_record = self.filter(record) 1676 if not maybe_record: 1677 return 1678 if isinstance(maybe_record, LogRecord): 1679 record = maybe_record 1680 self.callHandlers(record) 1681 1682 def addHandler(self, hdlr): 1683 """ 1684 Add the specified handler to this logger. 1685 """ 1686 with _lock: 1687 if not (hdlr in self.handlers): 1688 self.handlers.append(hdlr) 1689 1690 def removeHandler(self, hdlr): 1691 """ 1692 Remove the specified handler from this logger. 1693 """ 1694 with _lock: 1695 if hdlr in self.handlers: 1696 self.handlers.remove(hdlr) 1697 1698 def hasHandlers(self): 1699 """ 1700 See if this logger has any handlers configured. 1701 1702 Loop through all handlers for this logger and its parents in the 1703 logger hierarchy. Return True if a handler was found, else False. 1704 Stop searching up the hierarchy whenever a logger with the "propagate" 1705 attribute set to zero is found - that will be the last logger which 1706 is checked for the existence of handlers. 1707 """ 1708 c = self 1709 rv = False 1710 while c: 1711 if c.handlers: 1712 rv = True 1713 break 1714 if not c.propagate: 1715 break 1716 else: 1717 c = c.parent 1718 return rv 1719 1720 def callHandlers(self, record): 1721 """ 1722 Pass a record to all relevant handlers. 1723 1724 Loop through all handlers for this logger and its parents in the 1725 logger hierarchy. If no handler was found, output a one-off error 1726 message to sys.stderr. Stop searching up the hierarchy whenever a 1727 logger with the "propagate" attribute set to zero is found - that 1728 will be the last logger whose handlers are called. 1729 """ 1730 c = self 1731 found = 0 1732 while c: 1733 for hdlr in c.handlers: 1734 found = found + 1 1735 if record.levelno >= hdlr.level: 1736 hdlr.handle(record) 1737 if not c.propagate: 1738 c = None #break out 1739 else: 1740 c = c.parent 1741 if (found == 0): 1742 if lastResort: 1743 if record.levelno >= lastResort.level: 1744 lastResort.handle(record) 1745 elif raiseExceptions and not self.manager.emittedNoHandlerWarning: 1746 sys.stderr.write("No handlers could be found for logger" 1747 " \"%s\"\n" % self.name) 1748 self.manager.emittedNoHandlerWarning = True 1749 1750 def getEffectiveLevel(self): 1751 """ 1752 Get the effective level for this logger. 1753 1754 Loop through this logger and its parents in the logger hierarchy, 1755 looking for a non-zero logging level. Return the first one found. 1756 """ 1757 logger = self 1758 while logger: 1759 if logger.level: 1760 return logger.level 1761 logger = logger.parent 1762 return NOTSET 1763 1764 def isEnabledFor(self, level): 1765 """ 1766 Is this logger enabled for level 'level'? 1767 """ 1768 if self.disabled: 1769 return False 1770 1771 try: 1772 return self._cache[level] 1773 except KeyError: 1774 with _lock: 1775 if self.manager.disable >= level: 1776 is_enabled = self._cache[level] = False 1777 else: 1778 is_enabled = self._cache[level] = ( 1779 level >= self.getEffectiveLevel() 1780 ) 1781 return is_enabled 1782 1783 def getChild(self, suffix): 1784 """ 1785 Get a logger which is a descendant to this one. 1786 1787 This is a convenience method, such that 1788 1789 logging.getLogger('abc').getChild('def.ghi') 1790 1791 is the same as 1792 1793 logging.getLogger('abc.def.ghi') 1794 1795 It's useful, for example, when the parent logger is named using 1796 __name__ rather than a literal string. 1797 """ 1798 if self.root is not self: 1799 suffix = '.'.join((self.name, suffix)) 1800 return self.manager.getLogger(suffix) 1801 1802 def getChildren(self): 1803 1804 def _hierlevel(logger): 1805 if logger is logger.manager.root: 1806 return 0 1807 return 1 + logger.name.count('.') 1808 1809 d = self.manager.loggerDict 1810 with _lock: 1811 # exclude PlaceHolders - the last check is to ensure that lower-level 1812 # descendants aren't returned - if there are placeholders, a logger's 1813 # parent field might point to a grandparent or ancestor thereof. 1814 return set(item for item in d.values() 1815 if isinstance(item, Logger) and item.parent is self and 1816 _hierlevel(item) == 1 + _hierlevel(item.parent)) 1817 1818 def __repr__(self): 1819 level = getLevelName(self.getEffectiveLevel()) 1820 return '<%s %s (%s)>' % (self.__class__.__name__, self.name, level) 1821 1822 def __reduce__(self): 1823 if getLogger(self.name) is not self: 1824 import pickle 1825 raise pickle.PicklingError('logger cannot be pickled') 1826 return getLogger, (self.name,) 1827 1828 1829class RootLogger(Logger): 1830 """ 1831 A root logger is not that different to any other logger, except that 1832 it must have a logging level and there is only one instance of it in 1833 the hierarchy. 1834 """ 1835 def __init__(self, level): 1836 """ 1837 Initialize the logger with the name "root". 1838 """ 1839 Logger.__init__(self, "root", level) 1840 1841 def __reduce__(self): 1842 return getLogger, () 1843 1844_loggerClass = Logger 1845 1846class LoggerAdapter(object): 1847 """ 1848 An adapter for loggers which makes it easier to specify contextual 1849 information in logging output. 1850 """ 1851 1852 def __init__(self, logger, extra=None, merge_extra=False): 1853 """ 1854 Initialize the adapter with a logger and a dict-like object which 1855 provides contextual information. This constructor signature allows 1856 easy stacking of LoggerAdapters, if so desired. 1857 1858 You can effectively pass keyword arguments as shown in the 1859 following example: 1860 1861 adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) 1862 1863 By default, LoggerAdapter objects will drop the "extra" argument 1864 passed on the individual log calls to use its own instead. 1865 1866 Initializing it with merge_extra=True will instead merge both 1867 maps when logging, the individual call extra taking precedence 1868 over the LoggerAdapter instance extra 1869 1870 .. versionchanged:: 3.13 1871 The *merge_extra* argument was added. 1872 """ 1873 self.logger = logger 1874 self.extra = extra 1875 self.merge_extra = merge_extra 1876 1877 def process(self, msg, kwargs): 1878 """ 1879 Process the logging message and keyword arguments passed in to 1880 a logging call to insert contextual information. You can either 1881 manipulate the message itself, the keyword args or both. Return 1882 the message and kwargs modified (or not) to suit your needs. 1883 1884 Normally, you'll only need to override this one method in a 1885 LoggerAdapter subclass for your specific needs. 1886 """ 1887 if self.merge_extra and "extra" in kwargs: 1888 kwargs["extra"] = {**self.extra, **kwargs["extra"]} 1889 else: 1890 kwargs["extra"] = self.extra 1891 return msg, kwargs 1892 1893 # 1894 # Boilerplate convenience methods 1895 # 1896 def debug(self, msg, *args, **kwargs): 1897 """ 1898 Delegate a debug call to the underlying logger. 1899 """ 1900 self.log(DEBUG, msg, *args, **kwargs) 1901 1902 def info(self, msg, *args, **kwargs): 1903 """ 1904 Delegate an info call to the underlying logger. 1905 """ 1906 self.log(INFO, msg, *args, **kwargs) 1907 1908 def warning(self, msg, *args, **kwargs): 1909 """ 1910 Delegate a warning call to the underlying logger. 1911 """ 1912 self.log(WARNING, msg, *args, **kwargs) 1913 1914 def warn(self, msg, *args, **kwargs): 1915 warnings.warn("The 'warn' method is deprecated, " 1916 "use 'warning' instead", DeprecationWarning, 2) 1917 self.warning(msg, *args, **kwargs) 1918 1919 def error(self, msg, *args, **kwargs): 1920 """ 1921 Delegate an error call to the underlying logger. 1922 """ 1923 self.log(ERROR, msg, *args, **kwargs) 1924 1925 def exception(self, msg, *args, exc_info=True, **kwargs): 1926 """ 1927 Delegate an exception call to the underlying logger. 1928 """ 1929 self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs) 1930 1931 def critical(self, msg, *args, **kwargs): 1932 """ 1933 Delegate a critical call to the underlying logger. 1934 """ 1935 self.log(CRITICAL, msg, *args, **kwargs) 1936 1937 def log(self, level, msg, *args, **kwargs): 1938 """ 1939 Delegate a log call to the underlying logger, after adding 1940 contextual information from this adapter instance. 1941 """ 1942 if self.isEnabledFor(level): 1943 msg, kwargs = self.process(msg, kwargs) 1944 self.logger.log(level, msg, *args, **kwargs) 1945 1946 def isEnabledFor(self, level): 1947 """ 1948 Is this logger enabled for level 'level'? 1949 """ 1950 return self.logger.isEnabledFor(level) 1951 1952 def setLevel(self, level): 1953 """ 1954 Set the specified level on the underlying logger. 1955 """ 1956 self.logger.setLevel(level) 1957 1958 def getEffectiveLevel(self): 1959 """ 1960 Get the effective level for the underlying logger. 1961 """ 1962 return self.logger.getEffectiveLevel() 1963 1964 def hasHandlers(self): 1965 """ 1966 See if the underlying logger has any handlers. 1967 """ 1968 return self.logger.hasHandlers() 1969 1970 def _log(self, level, msg, args, **kwargs): 1971 """ 1972 Low-level log implementation, proxied to allow nested logger adapters. 1973 """ 1974 return self.logger._log(level, msg, args, **kwargs) 1975 1976 @property 1977 def manager(self): 1978 return self.logger.manager 1979 1980 @manager.setter 1981 def manager(self, value): 1982 self.logger.manager = value 1983 1984 @property 1985 def name(self): 1986 return self.logger.name 1987 1988 def __repr__(self): 1989 logger = self.logger 1990 level = getLevelName(logger.getEffectiveLevel()) 1991 return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level) 1992 1993 __class_getitem__ = classmethod(GenericAlias) 1994 1995root = RootLogger(WARNING) 1996Logger.root = root 1997Logger.manager = Manager(Logger.root) 1998 1999#--------------------------------------------------------------------------- 2000# Configuration classes and functions 2001#--------------------------------------------------------------------------- 2002 2003def basicConfig(**kwargs): 2004 """ 2005 Do basic configuration for the logging system. 2006 2007 This function does nothing if the root logger already has handlers 2008 configured, unless the keyword argument *force* is set to ``True``. 2009 It is a convenience method intended for use by simple scripts 2010 to do one-shot configuration of the logging package. 2011 2012 The default behaviour is to create a StreamHandler which writes to 2013 sys.stderr, set a formatter using the BASIC_FORMAT format string, and 2014 add the handler to the root logger. 2015 2016 A number of optional keyword arguments may be specified, which can alter 2017 the default behaviour. 2018 2019 filename Specifies that a FileHandler be created, using the specified 2020 filename, rather than a StreamHandler. 2021 filemode Specifies the mode to open the file, if filename is specified 2022 (if filemode is unspecified, it defaults to 'a'). 2023 format Use the specified format string for the handler. 2024 datefmt Use the specified date/time format. 2025 style If a format string is specified, use this to specify the 2026 type of format string (possible values '%', '{', '$', for 2027 %-formatting, :meth:`str.format` and :class:`string.Template` 2028 - defaults to '%'). 2029 level Set the root logger level to the specified level. 2030 stream Use the specified stream to initialize the StreamHandler. Note 2031 that this argument is incompatible with 'filename' - if both 2032 are present, 'stream' is ignored. 2033 handlers If specified, this should be an iterable of already created 2034 handlers, which will be added to the root logger. Any handler 2035 in the list which does not have a formatter assigned will be 2036 assigned the formatter created in this function. 2037 force If this keyword is specified as true, any existing handlers 2038 attached to the root logger are removed and closed, before 2039 carrying out the configuration as specified by the other 2040 arguments. 2041 encoding If specified together with a filename, this encoding is passed to 2042 the created FileHandler, causing it to be used when the file is 2043 opened. 2044 errors If specified together with a filename, this value is passed to the 2045 created FileHandler, causing it to be used when the file is 2046 opened in text mode. If not specified, the default value is 2047 `backslashreplace`. 2048 2049 Note that you could specify a stream created using open(filename, mode) 2050 rather than passing the filename and mode in. However, it should be 2051 remembered that StreamHandler does not close its stream (since it may be 2052 using sys.stdout or sys.stderr), whereas FileHandler closes its stream 2053 when the handler is closed. 2054 2055 .. versionchanged:: 3.2 2056 Added the ``style`` parameter. 2057 2058 .. versionchanged:: 3.3 2059 Added the ``handlers`` parameter. A ``ValueError`` is now thrown for 2060 incompatible arguments (e.g. ``handlers`` specified together with 2061 ``filename``/``filemode``, or ``filename``/``filemode`` specified 2062 together with ``stream``, or ``handlers`` specified together with 2063 ``stream``. 2064 2065 .. versionchanged:: 3.8 2066 Added the ``force`` parameter. 2067 2068 .. versionchanged:: 3.9 2069 Added the ``encoding`` and ``errors`` parameters. 2070 """ 2071 # Add thread safety in case someone mistakenly calls 2072 # basicConfig() from multiple threads 2073 with _lock: 2074 force = kwargs.pop('force', False) 2075 encoding = kwargs.pop('encoding', None) 2076 errors = kwargs.pop('errors', 'backslashreplace') 2077 if force: 2078 for h in root.handlers[:]: 2079 root.removeHandler(h) 2080 h.close() 2081 if len(root.handlers) == 0: 2082 handlers = kwargs.pop("handlers", None) 2083 if handlers is None: 2084 if "stream" in kwargs and "filename" in kwargs: 2085 raise ValueError("'stream' and 'filename' should not be " 2086 "specified together") 2087 else: 2088 if "stream" in kwargs or "filename" in kwargs: 2089 raise ValueError("'stream' or 'filename' should not be " 2090 "specified together with 'handlers'") 2091 if handlers is None: 2092 filename = kwargs.pop("filename", None) 2093 mode = kwargs.pop("filemode", 'a') 2094 if filename: 2095 if 'b' in mode: 2096 errors = None 2097 else: 2098 encoding = io.text_encoding(encoding) 2099 h = FileHandler(filename, mode, 2100 encoding=encoding, errors=errors) 2101 else: 2102 stream = kwargs.pop("stream", None) 2103 h = StreamHandler(stream) 2104 handlers = [h] 2105 dfs = kwargs.pop("datefmt", None) 2106 style = kwargs.pop("style", '%') 2107 if style not in _STYLES: 2108 raise ValueError('Style must be one of: %s' % ','.join( 2109 _STYLES.keys())) 2110 fs = kwargs.pop("format", _STYLES[style][1]) 2111 fmt = Formatter(fs, dfs, style) 2112 for h in handlers: 2113 if h.formatter is None: 2114 h.setFormatter(fmt) 2115 root.addHandler(h) 2116 level = kwargs.pop("level", None) 2117 if level is not None: 2118 root.setLevel(level) 2119 if kwargs: 2120 keys = ', '.join(kwargs.keys()) 2121 raise ValueError('Unrecognised argument(s): %s' % keys) 2122 2123#--------------------------------------------------------------------------- 2124# Utility functions at module level. 2125# Basically delegate everything to the root logger. 2126#--------------------------------------------------------------------------- 2127 2128def getLogger(name=None): 2129 """ 2130 Return a logger with the specified name, creating it if necessary. 2131 2132 If no name is specified, return the root logger. 2133 """ 2134 if not name or isinstance(name, str) and name == root.name: 2135 return root 2136 return Logger.manager.getLogger(name) 2137 2138def critical(msg, *args, **kwargs): 2139 """ 2140 Log a message with severity 'CRITICAL' on the root logger. If the logger 2141 has no handlers, call basicConfig() to add a console handler with a 2142 pre-defined format. 2143 """ 2144 if len(root.handlers) == 0: 2145 basicConfig() 2146 root.critical(msg, *args, **kwargs) 2147 2148def fatal(msg, *args, **kwargs): 2149 """ 2150 Don't use this function, use critical() instead. 2151 """ 2152 critical(msg, *args, **kwargs) 2153 2154def error(msg, *args, **kwargs): 2155 """ 2156 Log a message with severity 'ERROR' on the root logger. If the logger has 2157 no handlers, call basicConfig() to add a console handler with a pre-defined 2158 format. 2159 """ 2160 if len(root.handlers) == 0: 2161 basicConfig() 2162 root.error(msg, *args, **kwargs) 2163 2164def exception(msg, *args, exc_info=True, **kwargs): 2165 """ 2166 Log a message with severity 'ERROR' on the root logger, with exception 2167 information. If the logger has no handlers, basicConfig() is called to add 2168 a console handler with a pre-defined format. 2169 """ 2170 error(msg, *args, exc_info=exc_info, **kwargs) 2171 2172def warning(msg, *args, **kwargs): 2173 """ 2174 Log a message with severity 'WARNING' on the root logger. If the logger has 2175 no handlers, call basicConfig() to add a console handler with a pre-defined 2176 format. 2177 """ 2178 if len(root.handlers) == 0: 2179 basicConfig() 2180 root.warning(msg, *args, **kwargs) 2181 2182def warn(msg, *args, **kwargs): 2183 warnings.warn("The 'warn' function is deprecated, " 2184 "use 'warning' instead", DeprecationWarning, 2) 2185 warning(msg, *args, **kwargs) 2186 2187def info(msg, *args, **kwargs): 2188 """ 2189 Log a message with severity 'INFO' on the root logger. If the logger has 2190 no handlers, call basicConfig() to add a console handler with a pre-defined 2191 format. 2192 """ 2193 if len(root.handlers) == 0: 2194 basicConfig() 2195 root.info(msg, *args, **kwargs) 2196 2197def debug(msg, *args, **kwargs): 2198 """ 2199 Log a message with severity 'DEBUG' on the root logger. If the logger has 2200 no handlers, call basicConfig() to add a console handler with a pre-defined 2201 format. 2202 """ 2203 if len(root.handlers) == 0: 2204 basicConfig() 2205 root.debug(msg, *args, **kwargs) 2206 2207def log(level, msg, *args, **kwargs): 2208 """ 2209 Log 'msg % args' with the integer severity 'level' on the root logger. If 2210 the logger has no handlers, call basicConfig() to add a console handler 2211 with a pre-defined format. 2212 """ 2213 if len(root.handlers) == 0: 2214 basicConfig() 2215 root.log(level, msg, *args, **kwargs) 2216 2217def disable(level=CRITICAL): 2218 """ 2219 Disable all logging calls of severity 'level' and below. 2220 """ 2221 root.manager.disable = level 2222 root.manager._clear_cache() 2223 2224def shutdown(handlerList=_handlerList): 2225 """ 2226 Perform any cleanup actions in the logging system (e.g. flushing 2227 buffers). 2228 2229 Should be called at application exit. 2230 """ 2231 for wr in reversed(handlerList[:]): 2232 #errors might occur, for example, if files are locked 2233 #we just ignore them if raiseExceptions is not set 2234 try: 2235 h = wr() 2236 if h: 2237 try: 2238 h.acquire() 2239 # MemoryHandlers might not want to be flushed on close, 2240 # but circular imports prevent us scoping this to just 2241 # those handlers. hence the default to True. 2242 if getattr(h, 'flushOnClose', True): 2243 h.flush() 2244 h.close() 2245 except (OSError, ValueError): 2246 # Ignore errors which might be caused 2247 # because handlers have been closed but 2248 # references to them are still around at 2249 # application exit. 2250 pass 2251 finally: 2252 h.release() 2253 except: # ignore everything, as we're shutting down 2254 if raiseExceptions: 2255 raise 2256 #else, swallow 2257 2258#Let's try and shutdown automatically on application exit... 2259import atexit 2260atexit.register(shutdown) 2261 2262# Null handler 2263 2264class NullHandler(Handler): 2265 """ 2266 This handler does nothing. It's intended to be used to avoid the 2267 "No handlers could be found for logger XXX" one-off warning. This is 2268 important for library code, which may contain code to log events. If a user 2269 of the library does not configure logging, the one-off warning might be 2270 produced; to avoid this, the library developer simply needs to instantiate 2271 a NullHandler and add it to the top-level logger of the library module or 2272 package. 2273 """ 2274 def handle(self, record): 2275 """Stub.""" 2276 2277 def emit(self, record): 2278 """Stub.""" 2279 2280 def createLock(self): 2281 self.lock = None 2282 2283 def _at_fork_reinit(self): 2284 pass 2285 2286# Warnings integration 2287 2288_warnings_showwarning = None 2289 2290def _showwarning(message, category, filename, lineno, file=None, line=None): 2291 """ 2292 Implementation of showwarnings which redirects to logging, which will first 2293 check to see if the file parameter is None. If a file is specified, it will 2294 delegate to the original warnings implementation of showwarning. Otherwise, 2295 it will call warnings.formatwarning and will log the resulting string to a 2296 warnings logger named "py.warnings" with level logging.WARNING. 2297 """ 2298 if file is not None: 2299 if _warnings_showwarning is not None: 2300 _warnings_showwarning(message, category, filename, lineno, file, line) 2301 else: 2302 s = warnings.formatwarning(message, category, filename, lineno, line) 2303 logger = getLogger("py.warnings") 2304 if not logger.handlers: 2305 logger.addHandler(NullHandler()) 2306 # bpo-46557: Log str(s) as msg instead of logger.warning("%s", s) 2307 # since some log aggregation tools group logs by the msg arg 2308 logger.warning(str(s)) 2309 2310def captureWarnings(capture): 2311 """ 2312 If capture is true, redirect all warnings to the logging package. 2313 If capture is False, ensure that warnings are not redirected to logging 2314 but to their original destinations. 2315 """ 2316 global _warnings_showwarning 2317 if capture: 2318 if _warnings_showwarning is None: 2319 _warnings_showwarning = warnings.showwarning 2320 warnings.showwarning = _showwarning 2321 else: 2322 if _warnings_showwarning is not None: 2323 warnings.showwarning = _warnings_showwarning 2324 _warnings_showwarning = None
736class BufferingFormatter(object): 737 """ 738 A formatter suitable for formatting a number of records. 739 """ 740 def __init__(self, linefmt=None): 741 """ 742 Optionally specify a formatter which will be used to format each 743 individual record. 744 """ 745 if linefmt: 746 self.linefmt = linefmt 747 else: 748 self.linefmt = _defaultFormatter 749 750 def formatHeader(self, records): 751 """ 752 Return the header string for the specified records. 753 """ 754 return "" 755 756 def formatFooter(self, records): 757 """ 758 Return the footer string for the specified records. 759 """ 760 return "" 761 762 def format(self, records): 763 """ 764 Format the specified records and return the result as a string. 765 """ 766 rv = "" 767 if len(records) > 0: 768 rv = rv + self.formatHeader(records) 769 for record in records: 770 rv = rv + self.linefmt.format(record) 771 rv = rv + self.formatFooter(records) 772 return rv
A formatter suitable for formatting a number of records.
740 def __init__(self, linefmt=None): 741 """ 742 Optionally specify a formatter which will be used to format each 743 individual record. 744 """ 745 if linefmt: 746 self.linefmt = linefmt 747 else: 748 self.linefmt = _defaultFormatter
Optionally specify a formatter which will be used to format each individual record.
750 def formatHeader(self, records): 751 """ 752 Return the header string for the specified records. 753 """ 754 return ""
Return the header string for the specified records.
762 def format(self, records): 763 """ 764 Format the specified records and return the result as a string. 765 """ 766 rv = "" 767 if len(records) > 0: 768 rv = rv + self.formatHeader(records) 769 for record in records: 770 rv = rv + self.linefmt.format(record) 771 rv = rv + self.formatFooter(records) 772 return rv
Format the specified records and return the result as a string.
1190class FileHandler(StreamHandler): 1191 """ 1192 A handler class which writes formatted logging records to disk files. 1193 """ 1194 def __init__(self, filename, mode='a', encoding=None, delay=False, errors=None): 1195 """ 1196 Open the specified file and use it as the stream for logging. 1197 """ 1198 # Issue #27493: add support for Path objects to be passed in 1199 filename = os.fspath(filename) 1200 #keep the absolute path, otherwise derived classes which use this 1201 #may come a cropper when the current directory changes 1202 self.baseFilename = os.path.abspath(filename) 1203 self.mode = mode 1204 self.encoding = encoding 1205 if "b" not in mode: 1206 self.encoding = io.text_encoding(encoding) 1207 self.errors = errors 1208 self.delay = delay 1209 # bpo-26789: FileHandler keeps a reference to the builtin open() 1210 # function to be able to open or reopen the file during Python 1211 # finalization. 1212 self._builtin_open = open 1213 if delay: 1214 #We don't open the stream, but we still need to call the 1215 #Handler constructor to set level, formatter, lock etc. 1216 Handler.__init__(self) 1217 self.stream = None 1218 else: 1219 StreamHandler.__init__(self, self._open()) 1220 1221 def close(self): 1222 """ 1223 Closes the stream. 1224 """ 1225 with self.lock: 1226 try: 1227 if self.stream: 1228 try: 1229 self.flush() 1230 finally: 1231 stream = self.stream 1232 self.stream = None 1233 if hasattr(stream, "close"): 1234 stream.close() 1235 finally: 1236 # Issue #19523: call unconditionally to 1237 # prevent a handler leak when delay is set 1238 # Also see Issue #42378: we also rely on 1239 # self._closed being set to True there 1240 StreamHandler.close(self) 1241 1242 def _open(self): 1243 """ 1244 Open the current base file with the (original) mode and encoding. 1245 Return the resulting stream. 1246 """ 1247 open_func = self._builtin_open 1248 return open_func(self.baseFilename, self.mode, 1249 encoding=self.encoding, errors=self.errors) 1250 1251 def emit(self, record): 1252 """ 1253 Emit a record. 1254 1255 If the stream was not opened because 'delay' was specified in the 1256 constructor, open it before calling the superclass's emit. 1257 1258 If stream is not open, current mode is 'w' and `_closed=True`, record 1259 will not be emitted (see Issue #42378). 1260 """ 1261 if self.stream is None: 1262 if self.mode != 'w' or not self._closed: 1263 self.stream = self._open() 1264 if self.stream: 1265 StreamHandler.emit(self, record) 1266 1267 def __repr__(self): 1268 level = getLevelName(self.level) 1269 return '<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level)
A handler class which writes formatted logging records to disk files.
1194 def __init__(self, filename, mode='a', encoding=None, delay=False, errors=None): 1195 """ 1196 Open the specified file and use it as the stream for logging. 1197 """ 1198 # Issue #27493: add support for Path objects to be passed in 1199 filename = os.fspath(filename) 1200 #keep the absolute path, otherwise derived classes which use this 1201 #may come a cropper when the current directory changes 1202 self.baseFilename = os.path.abspath(filename) 1203 self.mode = mode 1204 self.encoding = encoding 1205 if "b" not in mode: 1206 self.encoding = io.text_encoding(encoding) 1207 self.errors = errors 1208 self.delay = delay 1209 # bpo-26789: FileHandler keeps a reference to the builtin open() 1210 # function to be able to open or reopen the file during Python 1211 # finalization. 1212 self._builtin_open = open 1213 if delay: 1214 #We don't open the stream, but we still need to call the 1215 #Handler constructor to set level, formatter, lock etc. 1216 Handler.__init__(self) 1217 self.stream = None 1218 else: 1219 StreamHandler.__init__(self, self._open())
Open the specified file and use it as the stream for logging.
1221 def close(self): 1222 """ 1223 Closes the stream. 1224 """ 1225 with self.lock: 1226 try: 1227 if self.stream: 1228 try: 1229 self.flush() 1230 finally: 1231 stream = self.stream 1232 self.stream = None 1233 if hasattr(stream, "close"): 1234 stream.close() 1235 finally: 1236 # Issue #19523: call unconditionally to 1237 # prevent a handler leak when delay is set 1238 # Also see Issue #42378: we also rely on 1239 # self._closed being set to True there 1240 StreamHandler.close(self)
Closes the stream.
1251 def emit(self, record): 1252 """ 1253 Emit a record. 1254 1255 If the stream was not opened because 'delay' was specified in the 1256 constructor, open it before calling the superclass's emit. 1257 1258 If stream is not open, current mode is 'w' and `_closed=True`, record 1259 will not be emitted (see Issue #42378). 1260 """ 1261 if self.stream is None: 1262 if self.mode != 'w' or not self._closed: 1263 self.stream = self._open() 1264 if self.stream: 1265 StreamHandler.emit(self, record)
Emit a record.
If the stream was not opened because 'delay' was specified in the constructor, open it before calling the superclass's emit.
If stream is not open, current mode is 'w' and _closed=True
, record
will not be emitted (see Issue #42378).
778class Filter(object): 779 """ 780 Filter instances are used to perform arbitrary filtering of LogRecords. 781 782 Loggers and Handlers can optionally use Filter instances to filter 783 records as desired. The base filter class only allows events which are 784 below a certain point in the logger hierarchy. For example, a filter 785 initialized with "A.B" will allow events logged by loggers "A.B", 786 "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If 787 initialized with the empty string, all events are passed. 788 """ 789 def __init__(self, name=''): 790 """ 791 Initialize a filter. 792 793 Initialize with the name of the logger which, together with its 794 children, will have its events allowed through the filter. If no 795 name is specified, allow every event. 796 """ 797 self.name = name 798 self.nlen = len(name) 799 800 def filter(self, record): 801 """ 802 Determine if the specified record is to be logged. 803 804 Returns True if the record should be logged, or False otherwise. 805 If deemed appropriate, the record may be modified in-place. 806 """ 807 if self.nlen == 0: 808 return True 809 elif self.name == record.name: 810 return True 811 elif record.name.find(self.name, 0, self.nlen) != 0: 812 return False 813 return (record.name[self.nlen] == ".")
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter records as desired. The base filter class only allows events which are below a certain point in the logger hierarchy. For example, a filter initialized with "A.B" will allow events logged by loggers "A.B", "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If initialized with the empty string, all events are passed.
789 def __init__(self, name=''): 790 """ 791 Initialize a filter. 792 793 Initialize with the name of the logger which, together with its 794 children, will have its events allowed through the filter. If no 795 name is specified, allow every event. 796 """ 797 self.name = name 798 self.nlen = len(name)
Initialize a filter.
Initialize with the name of the logger which, together with its children, will have its events allowed through the filter. If no name is specified, allow every event.
800 def filter(self, record): 801 """ 802 Determine if the specified record is to be logged. 803 804 Returns True if the record should be logged, or False otherwise. 805 If deemed appropriate, the record may be modified in-place. 806 """ 807 if self.nlen == 0: 808 return True 809 elif self.name == record.name: 810 return True 811 elif record.name.find(self.name, 0, self.nlen) != 0: 812 return False 813 return (record.name[self.nlen] == ".")
Determine if the specified record is to be logged.
Returns True if the record should be logged, or False otherwise. If deemed appropriate, the record may be modified in-place.
555class Formatter(object): 556 """ 557 Formatter instances are used to convert a LogRecord to text. 558 559 Formatters need to know how a LogRecord is constructed. They are 560 responsible for converting a LogRecord to (usually) a string which can 561 be interpreted by either a human or an external system. The base Formatter 562 allows a formatting string to be specified. If none is supplied, the 563 style-dependent default value, "%(message)s", "{message}", or 564 "${message}", is used. 565 566 The Formatter can be initialized with a format string which makes use of 567 knowledge of the LogRecord attributes - e.g. the default value mentioned 568 above makes use of the fact that the user's message and arguments are pre- 569 formatted into a LogRecord's message attribute. Currently, the useful 570 attributes in a LogRecord are described by: 571 572 %(name)s Name of the logger (logging channel) 573 %(levelno)s Numeric logging level for the message (DEBUG, INFO, 574 WARNING, ERROR, CRITICAL) 575 %(levelname)s Text logging level for the message ("DEBUG", "INFO", 576 "WARNING", "ERROR", "CRITICAL") 577 %(pathname)s Full pathname of the source file where the logging 578 call was issued (if available) 579 %(filename)s Filename portion of pathname 580 %(module)s Module (name portion of filename) 581 %(lineno)d Source line number where the logging call was issued 582 (if available) 583 %(funcName)s Function name 584 %(created)f Time when the LogRecord was created (time.time_ns() / 1e9 585 return value) 586 %(asctime)s Textual time when the LogRecord was created 587 %(msecs)d Millisecond portion of the creation time 588 %(relativeCreated)d Time in milliseconds when the LogRecord was created, 589 relative to the time the logging module was loaded 590 (typically at application startup time) 591 %(thread)d Thread ID (if available) 592 %(threadName)s Thread name (if available) 593 %(taskName)s Task name (if available) 594 %(process)d Process ID (if available) 595 %(message)s The result of record.getMessage(), computed just as 596 the record is emitted 597 """ 598 599 converter = time.localtime 600 601 def __init__(self, fmt=None, datefmt=None, style='%', validate=True, *, 602 defaults=None): 603 """ 604 Initialize the formatter with specified format strings. 605 606 Initialize the formatter either with the specified format string, or a 607 default as described above. Allow for specialized date formatting with 608 the optional datefmt argument. If datefmt is omitted, you get an 609 ISO8601-like (or RFC 3339-like) format. 610 611 Use a style parameter of '%', '{' or '$' to specify that you want to 612 use one of %-formatting, :meth:`str.format` (``{}``) formatting or 613 :class:`string.Template` formatting in your format string. 614 615 .. versionchanged:: 3.2 616 Added the ``style`` parameter. 617 """ 618 if style not in _STYLES: 619 raise ValueError('Style must be one of: %s' % ','.join( 620 _STYLES.keys())) 621 self._style = _STYLES[style][0](fmt, defaults=defaults) 622 if validate: 623 self._style.validate() 624 625 self._fmt = self._style._fmt 626 self.datefmt = datefmt 627 628 default_time_format = '%Y-%m-%d %H:%M:%S' 629 default_msec_format = '%s,%03d' 630 631 def formatTime(self, record, datefmt=None): 632 """ 633 Return the creation time of the specified LogRecord as formatted text. 634 635 This method should be called from format() by a formatter which 636 wants to make use of a formatted time. This method can be overridden 637 in formatters to provide for any specific requirement, but the 638 basic behaviour is as follows: if datefmt (a string) is specified, 639 it is used with time.strftime() to format the creation time of the 640 record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. 641 The resulting string is returned. This function uses a user-configurable 642 function to convert the creation time to a tuple. By default, 643 time.localtime() is used; to change this for a particular formatter 644 instance, set the 'converter' attribute to a function with the same 645 signature as time.localtime() or time.gmtime(). To change it for all 646 formatters, for example if you want all logging times to be shown in GMT, 647 set the 'converter' attribute in the Formatter class. 648 """ 649 ct = self.converter(record.created) 650 if datefmt: 651 s = time.strftime(datefmt, ct) 652 else: 653 s = time.strftime(self.default_time_format, ct) 654 if self.default_msec_format: 655 s = self.default_msec_format % (s, record.msecs) 656 return s 657 658 def formatException(self, ei): 659 """ 660 Format and return the specified exception information as a string. 661 662 This default implementation just uses 663 traceback.print_exception() 664 """ 665 sio = io.StringIO() 666 tb = ei[2] 667 # See issues #9427, #1553375. Commented out for now. 668 #if getattr(self, 'fullstack', False): 669 # traceback.print_stack(tb.tb_frame.f_back, file=sio) 670 traceback.print_exception(ei[0], ei[1], tb, limit=None, file=sio) 671 s = sio.getvalue() 672 sio.close() 673 if s[-1:] == "\n": 674 s = s[:-1] 675 return s 676 677 def usesTime(self): 678 """ 679 Check if the format uses the creation time of the record. 680 """ 681 return self._style.usesTime() 682 683 def formatMessage(self, record): 684 return self._style.format(record) 685 686 def formatStack(self, stack_info): 687 """ 688 This method is provided as an extension point for specialized 689 formatting of stack information. 690 691 The input data is a string as returned from a call to 692 :func:`traceback.print_stack`, but with the last trailing newline 693 removed. 694 695 The base implementation just returns the value passed in. 696 """ 697 return stack_info 698 699 def format(self, record): 700 """ 701 Format the specified record as text. 702 703 The record's attribute dictionary is used as the operand to a 704 string formatting operation which yields the returned string. 705 Before formatting the dictionary, a couple of preparatory steps 706 are carried out. The message attribute of the record is computed 707 using LogRecord.getMessage(). If the formatting string uses the 708 time (as determined by a call to usesTime(), formatTime() is 709 called to format the event time. If there is exception information, 710 it is formatted using formatException() and appended to the message. 711 """ 712 record.message = record.getMessage() 713 if self.usesTime(): 714 record.asctime = self.formatTime(record, self.datefmt) 715 s = self.formatMessage(record) 716 if record.exc_info: 717 # Cache the traceback text to avoid converting it multiple times 718 # (it's constant anyway) 719 if not record.exc_text: 720 record.exc_text = self.formatException(record.exc_info) 721 if record.exc_text: 722 if s[-1:] != "\n": 723 s = s + "\n" 724 s = s + record.exc_text 725 if record.stack_info: 726 if s[-1:] != "\n": 727 s = s + "\n" 728 s = s + self.formatStack(record.stack_info) 729 return s
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are responsible for converting a LogRecord to (usually) a string which can be interpreted by either a human or an external system. The base Formatter allows a formatting string to be specified. If none is supplied, the style-dependent default value, "%(message)s", "{message}", or "${message}", is used.
The Formatter can be initialized with a format string which makes use of knowledge of the LogRecord attributes - e.g. the default value mentioned above makes use of the fact that the user's message and arguments are pre- formatted into a LogRecord's message attribute. Currently, the useful attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel) %(levelno)s Numeric logging level for the message (DEBUG, INFO, WARNING, ERROR, CRITICAL) %(levelname)s Text logging level for the message ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL") %(pathname)s Full pathname of the source file where the logging call was issued (if available) %(filename)s Filename portion of pathname %(module)s Module (name portion of filename) %(lineno)d Source line number where the logging call was issued (if available) %(funcName)s Function name %(created)f Time when the LogRecord was created (time.time_ns() / 1e9 return value) %(asctime)s Textual time when the LogRecord was created %(msecs)d Millisecond portion of the creation time %(relativeCreated)d Time in milliseconds when the LogRecord was created, relative to the time the logging module was loaded (typically at application startup time) %(thread)d Thread ID (if available) %(threadName)s Thread name (if available) %(taskName)s Task name (if available) %(process)d Process ID (if available) %(message)s The result of record.getMessage(), computed just as the record is emitted
601 def __init__(self, fmt=None, datefmt=None, style='%', validate=True, *, 602 defaults=None): 603 """ 604 Initialize the formatter with specified format strings. 605 606 Initialize the formatter either with the specified format string, or a 607 default as described above. Allow for specialized date formatting with 608 the optional datefmt argument. If datefmt is omitted, you get an 609 ISO8601-like (or RFC 3339-like) format. 610 611 Use a style parameter of '%', '{' or '$' to specify that you want to 612 use one of %-formatting, :meth:`str.format` (``{}``) formatting or 613 :class:`string.Template` formatting in your format string. 614 615 .. versionchanged:: 3.2 616 Added the ``style`` parameter. 617 """ 618 if style not in _STYLES: 619 raise ValueError('Style must be one of: %s' % ','.join( 620 _STYLES.keys())) 621 self._style = _STYLES[style][0](fmt, defaults=defaults) 622 if validate: 623 self._style.validate() 624 625 self._fmt = self._style._fmt 626 self.datefmt = datefmt
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a default as described above. Allow for specialized date formatting with the optional datefmt argument. If datefmt is omitted, you get an ISO8601-like (or RFC 3339-like) format.
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, str.format()
({}
) formatting or
string.Template
formatting in your format string.
Changed in version 3.2:
Added the style
parameter.
localtime([seconds]) -> (tm_year,tm_mon,tm_mday,tm_hour,tm_min, tm_sec,tm_wday,tm_yday,tm_isdst)
Convert seconds since the Epoch to a time tuple expressing local time. When 'seconds' is not passed in, convert the current time instead.
631 def formatTime(self, record, datefmt=None): 632 """ 633 Return the creation time of the specified LogRecord as formatted text. 634 635 This method should be called from format() by a formatter which 636 wants to make use of a formatted time. This method can be overridden 637 in formatters to provide for any specific requirement, but the 638 basic behaviour is as follows: if datefmt (a string) is specified, 639 it is used with time.strftime() to format the creation time of the 640 record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. 641 The resulting string is returned. This function uses a user-configurable 642 function to convert the creation time to a tuple. By default, 643 time.localtime() is used; to change this for a particular formatter 644 instance, set the 'converter' attribute to a function with the same 645 signature as time.localtime() or time.gmtime(). To change it for all 646 formatters, for example if you want all logging times to be shown in GMT, 647 set the 'converter' attribute in the Formatter class. 648 """ 649 ct = self.converter(record.created) 650 if datefmt: 651 s = time.strftime(datefmt, ct) 652 else: 653 s = time.strftime(self.default_time_format, ct) 654 if self.default_msec_format: 655 s = self.default_msec_format % (s, record.msecs) 656 return s
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which wants to make use of a formatted time. This method can be overridden in formatters to provide for any specific requirement, but the basic behaviour is as follows: if datefmt (a string) is specified, it is used with time.strftime() to format the creation time of the record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. The resulting string is returned. This function uses a user-configurable function to convert the creation time to a tuple. By default, time.localtime() is used; to change this for a particular formatter instance, set the 'converter' attribute to a function with the same signature as time.localtime() or time.gmtime(). To change it for all formatters, for example if you want all logging times to be shown in GMT, set the 'converter' attribute in the Formatter class.
658 def formatException(self, ei): 659 """ 660 Format and return the specified exception information as a string. 661 662 This default implementation just uses 663 traceback.print_exception() 664 """ 665 sio = io.StringIO() 666 tb = ei[2] 667 # See issues #9427, #1553375. Commented out for now. 668 #if getattr(self, 'fullstack', False): 669 # traceback.print_stack(tb.tb_frame.f_back, file=sio) 670 traceback.print_exception(ei[0], ei[1], tb, limit=None, file=sio) 671 s = sio.getvalue() 672 sio.close() 673 if s[-1:] == "\n": 674 s = s[:-1] 675 return s
Format and return the specified exception information as a string.
This default implementation just uses traceback.print_exception()
677 def usesTime(self): 678 """ 679 Check if the format uses the creation time of the record. 680 """ 681 return self._style.usesTime()
Check if the format uses the creation time of the record.
686 def formatStack(self, stack_info): 687 """ 688 This method is provided as an extension point for specialized 689 formatting of stack information. 690 691 The input data is a string as returned from a call to 692 :func:`traceback.print_stack`, but with the last trailing newline 693 removed. 694 695 The base implementation just returns the value passed in. 696 """ 697 return stack_info
This method is provided as an extension point for specialized formatting of stack information.
The input data is a string as returned from a call to
traceback.print_stack()
, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
699 def format(self, record): 700 """ 701 Format the specified record as text. 702 703 The record's attribute dictionary is used as the operand to a 704 string formatting operation which yields the returned string. 705 Before formatting the dictionary, a couple of preparatory steps 706 are carried out. The message attribute of the record is computed 707 using LogRecord.getMessage(). If the formatting string uses the 708 time (as determined by a call to usesTime(), formatTime() is 709 called to format the event time. If there is exception information, 710 it is formatted using formatException() and appended to the message. 711 """ 712 record.message = record.getMessage() 713 if self.usesTime(): 714 record.asctime = self.formatTime(record, self.datefmt) 715 s = self.formatMessage(record) 716 if record.exc_info: 717 # Cache the traceback text to avoid converting it multiple times 718 # (it's constant anyway) 719 if not record.exc_text: 720 record.exc_text = self.formatException(record.exc_info) 721 if record.exc_text: 722 if s[-1:] != "\n": 723 s = s + "\n" 724 s = s + record.exc_text 725 if record.stack_info: 726 if s[-1:] != "\n": 727 s = s + "\n" 728 s = s + self.formatStack(record.stack_info) 729 return s
Format the specified record as text.
The record's attribute dictionary is used as the operand to a string formatting operation which yields the returned string. Before formatting the dictionary, a couple of preparatory steps are carried out. The message attribute of the record is computed using LogRecord.getMessage(). If the formatting string uses the time (as determined by a call to usesTime(), formatTime() is called to format the event time. If there is exception information, it is formatted using formatException() and appended to the message.
922class Handler(Filterer): 923 """ 924 Handler instances dispatch logging events to specific destinations. 925 926 The base handler class. Acts as a placeholder which defines the Handler 927 interface. Handlers can optionally use Formatter instances to format 928 records as desired. By default, no formatter is specified; in this case, 929 the 'raw' message as determined by record.message is logged. 930 """ 931 def __init__(self, level=NOTSET): 932 """ 933 Initializes the instance - basically setting the formatter to None 934 and the filter list to empty. 935 """ 936 Filterer.__init__(self) 937 self._name = None 938 self.level = _checkLevel(level) 939 self.formatter = None 940 self._closed = False 941 # Add the handler to the global _handlerList (for cleanup on shutdown) 942 _addHandlerRef(self) 943 self.createLock() 944 945 def get_name(self): 946 return self._name 947 948 def set_name(self, name): 949 with _lock: 950 if self._name in _handlers: 951 del _handlers[self._name] 952 self._name = name 953 if name: 954 _handlers[name] = self 955 956 name = property(get_name, set_name) 957 958 def createLock(self): 959 """ 960 Acquire a thread lock for serializing access to the underlying I/O. 961 """ 962 self.lock = threading.RLock() 963 _register_at_fork_reinit_lock(self) 964 965 def _at_fork_reinit(self): 966 self.lock._at_fork_reinit() 967 968 def acquire(self): 969 """ 970 Acquire the I/O thread lock. 971 """ 972 if self.lock: 973 self.lock.acquire() 974 975 def release(self): 976 """ 977 Release the I/O thread lock. 978 """ 979 if self.lock: 980 self.lock.release() 981 982 def setLevel(self, level): 983 """ 984 Set the logging level of this handler. level must be an int or a str. 985 """ 986 self.level = _checkLevel(level) 987 988 def format(self, record): 989 """ 990 Format the specified record. 991 992 If a formatter is set, use it. Otherwise, use the default formatter 993 for the module. 994 """ 995 if self.formatter: 996 fmt = self.formatter 997 else: 998 fmt = _defaultFormatter 999 return fmt.format(record) 1000 1001 def emit(self, record): 1002 """ 1003 Do whatever it takes to actually log the specified logging record. 1004 1005 This version is intended to be implemented by subclasses and so 1006 raises a NotImplementedError. 1007 """ 1008 raise NotImplementedError('emit must be implemented ' 1009 'by Handler subclasses') 1010 1011 def handle(self, record): 1012 """ 1013 Conditionally emit the specified logging record. 1014 1015 Emission depends on filters which may have been added to the handler. 1016 Wrap the actual emission of the record with acquisition/release of 1017 the I/O thread lock. 1018 1019 Returns an instance of the log record that was emitted 1020 if it passed all filters, otherwise a false value is returned. 1021 """ 1022 rv = self.filter(record) 1023 if isinstance(rv, LogRecord): 1024 record = rv 1025 if rv: 1026 with self.lock: 1027 self.emit(record) 1028 return rv 1029 1030 def setFormatter(self, fmt): 1031 """ 1032 Set the formatter for this handler. 1033 """ 1034 self.formatter = fmt 1035 1036 def flush(self): 1037 """ 1038 Ensure all logging output has been flushed. 1039 1040 This version does nothing and is intended to be implemented by 1041 subclasses. 1042 """ 1043 pass 1044 1045 def close(self): 1046 """ 1047 Tidy up any resources used by the handler. 1048 1049 This version removes the handler from an internal map of handlers, 1050 _handlers, which is used for handler lookup by name. Subclasses 1051 should ensure that this gets called from overridden close() 1052 methods. 1053 """ 1054 #get the module data lock, as we're updating a shared structure. 1055 with _lock: 1056 self._closed = True 1057 if self._name and self._name in _handlers: 1058 del _handlers[self._name] 1059 1060 def handleError(self, record): 1061 """ 1062 Handle errors which occur during an emit() call. 1063 1064 This method should be called from handlers when an exception is 1065 encountered during an emit() call. If raiseExceptions is false, 1066 exceptions get silently ignored. This is what is mostly wanted 1067 for a logging system - most users will not care about errors in 1068 the logging system, they are more interested in application errors. 1069 You could, however, replace this with a custom handler if you wish. 1070 The record which was being processed is passed in to this method. 1071 """ 1072 if raiseExceptions and sys.stderr: # see issue 13807 1073 exc = sys.exception() 1074 try: 1075 sys.stderr.write('--- Logging error ---\n') 1076 traceback.print_exception(exc, limit=None, file=sys.stderr) 1077 sys.stderr.write('Call stack:\n') 1078 # Walk the stack frame up until we're out of logging, 1079 # so as to print the calling context. 1080 frame = exc.__traceback__.tb_frame 1081 while (frame and os.path.dirname(frame.f_code.co_filename) == 1082 __path__[0]): 1083 frame = frame.f_back 1084 if frame: 1085 traceback.print_stack(frame, file=sys.stderr) 1086 else: 1087 # couldn't find the right stack frame, for some reason 1088 sys.stderr.write('Logged from file %s, line %s\n' % ( 1089 record.filename, record.lineno)) 1090 # Issue 18671: output logging message and arguments 1091 try: 1092 sys.stderr.write('Message: %r\n' 1093 'Arguments: %s\n' % (record.msg, 1094 record.args)) 1095 except RecursionError: # See issue 36272 1096 raise 1097 except Exception: 1098 sys.stderr.write('Unable to print the message and arguments' 1099 ' - possible formatting error.\nUse the' 1100 ' traceback above to help find the error.\n' 1101 ) 1102 except OSError: #pragma: no cover 1103 pass # see issue 5971 1104 finally: 1105 del exc 1106 1107 def __repr__(self): 1108 level = getLevelName(self.level) 1109 return '<%s (%s)>' % (self.__class__.__name__, level)
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler interface. Handlers can optionally use Formatter instances to format records as desired. By default, no formatter is specified; in this case, the 'raw' message as determined by record.message is logged.
931 def __init__(self, level=NOTSET): 932 """ 933 Initializes the instance - basically setting the formatter to None 934 and the filter list to empty. 935 """ 936 Filterer.__init__(self) 937 self._name = None 938 self.level = _checkLevel(level) 939 self.formatter = None 940 self._closed = False 941 # Add the handler to the global _handlerList (for cleanup on shutdown) 942 _addHandlerRef(self) 943 self.createLock()
Initializes the instance - basically setting the formatter to None and the filter list to empty.
958 def createLock(self): 959 """ 960 Acquire a thread lock for serializing access to the underlying I/O. 961 """ 962 self.lock = threading.RLock() 963 _register_at_fork_reinit_lock(self)
Acquire a thread lock for serializing access to the underlying I/O.
968 def acquire(self): 969 """ 970 Acquire the I/O thread lock. 971 """ 972 if self.lock: 973 self.lock.acquire()
Acquire the I/O thread lock.
975 def release(self): 976 """ 977 Release the I/O thread lock. 978 """ 979 if self.lock: 980 self.lock.release()
Release the I/O thread lock.
982 def setLevel(self, level): 983 """ 984 Set the logging level of this handler. level must be an int or a str. 985 """ 986 self.level = _checkLevel(level)
Set the logging level of this handler. level must be an int or a str.
988 def format(self, record): 989 """ 990 Format the specified record. 991 992 If a formatter is set, use it. Otherwise, use the default formatter 993 for the module. 994 """ 995 if self.formatter: 996 fmt = self.formatter 997 else: 998 fmt = _defaultFormatter 999 return fmt.format(record)
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter for the module.
1001 def emit(self, record): 1002 """ 1003 Do whatever it takes to actually log the specified logging record. 1004 1005 This version is intended to be implemented by subclasses and so 1006 raises a NotImplementedError. 1007 """ 1008 raise NotImplementedError('emit must be implemented ' 1009 'by Handler subclasses')
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so raises a NotImplementedError.
1011 def handle(self, record): 1012 """ 1013 Conditionally emit the specified logging record. 1014 1015 Emission depends on filters which may have been added to the handler. 1016 Wrap the actual emission of the record with acquisition/release of 1017 the I/O thread lock. 1018 1019 Returns an instance of the log record that was emitted 1020 if it passed all filters, otherwise a false value is returned. 1021 """ 1022 rv = self.filter(record) 1023 if isinstance(rv, LogRecord): 1024 record = rv 1025 if rv: 1026 with self.lock: 1027 self.emit(record) 1028 return rv
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler. Wrap the actual emission of the record with acquisition/release of the I/O thread lock.
Returns an instance of the log record that was emitted if it passed all filters, otherwise a false value is returned.
1030 def setFormatter(self, fmt): 1031 """ 1032 Set the formatter for this handler. 1033 """ 1034 self.formatter = fmt
Set the formatter for this handler.
1036 def flush(self): 1037 """ 1038 Ensure all logging output has been flushed. 1039 1040 This version does nothing and is intended to be implemented by 1041 subclasses. 1042 """ 1043 pass
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by subclasses.
1045 def close(self): 1046 """ 1047 Tidy up any resources used by the handler. 1048 1049 This version removes the handler from an internal map of handlers, 1050 _handlers, which is used for handler lookup by name. Subclasses 1051 should ensure that this gets called from overridden close() 1052 methods. 1053 """ 1054 #get the module data lock, as we're updating a shared structure. 1055 with _lock: 1056 self._closed = True 1057 if self._name and self._name in _handlers: 1058 del _handlers[self._name]
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers, _handlers, which is used for handler lookup by name. Subclasses should ensure that this gets called from overridden close() methods.
1060 def handleError(self, record): 1061 """ 1062 Handle errors which occur during an emit() call. 1063 1064 This method should be called from handlers when an exception is 1065 encountered during an emit() call. If raiseExceptions is false, 1066 exceptions get silently ignored. This is what is mostly wanted 1067 for a logging system - most users will not care about errors in 1068 the logging system, they are more interested in application errors. 1069 You could, however, replace this with a custom handler if you wish. 1070 The record which was being processed is passed in to this method. 1071 """ 1072 if raiseExceptions and sys.stderr: # see issue 13807 1073 exc = sys.exception() 1074 try: 1075 sys.stderr.write('--- Logging error ---\n') 1076 traceback.print_exception(exc, limit=None, file=sys.stderr) 1077 sys.stderr.write('Call stack:\n') 1078 # Walk the stack frame up until we're out of logging, 1079 # so as to print the calling context. 1080 frame = exc.__traceback__.tb_frame 1081 while (frame and os.path.dirname(frame.f_code.co_filename) == 1082 __path__[0]): 1083 frame = frame.f_back 1084 if frame: 1085 traceback.print_stack(frame, file=sys.stderr) 1086 else: 1087 # couldn't find the right stack frame, for some reason 1088 sys.stderr.write('Logged from file %s, line %s\n' % ( 1089 record.filename, record.lineno)) 1090 # Issue 18671: output logging message and arguments 1091 try: 1092 sys.stderr.write('Message: %r\n' 1093 'Arguments: %s\n' % (record.msg, 1094 record.args)) 1095 except RecursionError: # See issue 36272 1096 raise 1097 except Exception: 1098 sys.stderr.write('Unable to print the message and arguments' 1099 ' - possible formatting error.\nUse the' 1100 ' traceback above to help find the error.\n' 1101 ) 1102 except OSError: #pragma: no cover 1103 pass # see issue 5971 1104 finally: 1105 del exc
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is encountered during an emit() call. If raiseExceptions is false, exceptions get silently ignored. This is what is mostly wanted for a logging system - most users will not care about errors in the logging system, they are more interested in application errors. You could, however, replace this with a custom handler if you wish. The record which was being processed is passed in to this method.
Inherited Members
287class LogRecord(object): 288 """ 289 A LogRecord instance represents an event being logged. 290 291 LogRecord instances are created every time something is logged. They 292 contain all the information pertinent to the event being logged. The 293 main information passed in is in msg and args, which are combined 294 using str(msg) % args to create the message field of the record. The 295 record also includes information such as when the record was created, 296 the source line where the logging call was made, and any exception 297 information to be logged. 298 """ 299 def __init__(self, name, level, pathname, lineno, 300 msg, args, exc_info, func=None, sinfo=None, **kwargs): 301 """ 302 Initialize a logging record with interesting information. 303 """ 304 ct = time.time_ns() 305 self.name = name 306 self.msg = msg 307 # 308 # The following statement allows passing of a dictionary as a sole 309 # argument, so that you can do something like 310 # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) 311 # Suggested by Stefan Behnel. 312 # Note that without the test for args[0], we get a problem because 313 # during formatting, we test to see if the arg is present using 314 # 'if self.args:'. If the event being logged is e.g. 'Value is %d' 315 # and if the passed arg fails 'if self.args:' then no formatting 316 # is done. For example, logger.warning('Value is %d', 0) would log 317 # 'Value is %d' instead of 'Value is 0'. 318 # For the use case of passing a dictionary, this should not be a 319 # problem. 320 # Issue #21172: a request was made to relax the isinstance check 321 # to hasattr(args[0], '__getitem__'). However, the docs on string 322 # formatting still seem to suggest a mapping object is required. 323 # Thus, while not removing the isinstance check, it does now look 324 # for collections.abc.Mapping rather than, as before, dict. 325 if (args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping) 326 and args[0]): 327 args = args[0] 328 self.args = args 329 self.levelname = getLevelName(level) 330 self.levelno = level 331 self.pathname = pathname 332 try: 333 self.filename = os.path.basename(pathname) 334 self.module = os.path.splitext(self.filename)[0] 335 except (TypeError, ValueError, AttributeError): 336 self.filename = pathname 337 self.module = "Unknown module" 338 self.exc_info = exc_info 339 self.exc_text = None # used to cache the traceback text 340 self.stack_info = sinfo 341 self.lineno = lineno 342 self.funcName = func 343 self.created = ct / 1e9 # ns to float seconds 344 # Get the number of whole milliseconds (0-999) in the fractional part of seconds. 345 # Eg: 1_677_903_920_999_998_503 ns --> 999_998_503 ns--> 999 ms 346 # Convert to float by adding 0.0 for historical reasons. See gh-89047 347 self.msecs = (ct % 1_000_000_000) // 1_000_000 + 0.0 348 if self.msecs == 999.0 and int(self.created) != ct // 1_000_000_000: 349 # ns -> sec conversion can round up, e.g: 350 # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec 351 self.msecs = 0.0 352 353 self.relativeCreated = (ct - _startTime) / 1e6 354 if logThreads: 355 self.thread = threading.get_ident() 356 self.threadName = threading.current_thread().name 357 else: # pragma: no cover 358 self.thread = None 359 self.threadName = None 360 if not logMultiprocessing: # pragma: no cover 361 self.processName = None 362 else: 363 self.processName = 'MainProcess' 364 mp = sys.modules.get('multiprocessing') 365 if mp is not None: 366 # Errors may occur if multiprocessing has not finished loading 367 # yet - e.g. if a custom import hook causes third-party code 368 # to run when multiprocessing calls import. See issue 8200 369 # for an example 370 try: 371 self.processName = mp.current_process().name 372 except Exception: #pragma: no cover 373 pass 374 if logProcesses and hasattr(os, 'getpid'): 375 self.process = os.getpid() 376 else: 377 self.process = None 378 379 self.taskName = None 380 if logAsyncioTasks: 381 asyncio = sys.modules.get('asyncio') 382 if asyncio: 383 try: 384 self.taskName = asyncio.current_task().get_name() 385 except Exception: 386 pass 387 388 def __repr__(self): 389 return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno, 390 self.pathname, self.lineno, self.msg) 391 392 def getMessage(self): 393 """ 394 Return the message for this LogRecord. 395 396 Return the message for this LogRecord after merging any user-supplied 397 arguments with the message. 398 """ 399 msg = str(self.msg) 400 if self.args: 401 msg = msg % self.args 402 return msg
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They contain all the information pertinent to the event being logged. The main information passed in is in msg and args, which are combined using str(msg) % args to create the message field of the record. The record also includes information such as when the record was created, the source line where the logging call was made, and any exception information to be logged.
299 def __init__(self, name, level, pathname, lineno, 300 msg, args, exc_info, func=None, sinfo=None, **kwargs): 301 """ 302 Initialize a logging record with interesting information. 303 """ 304 ct = time.time_ns() 305 self.name = name 306 self.msg = msg 307 # 308 # The following statement allows passing of a dictionary as a sole 309 # argument, so that you can do something like 310 # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) 311 # Suggested by Stefan Behnel. 312 # Note that without the test for args[0], we get a problem because 313 # during formatting, we test to see if the arg is present using 314 # 'if self.args:'. If the event being logged is e.g. 'Value is %d' 315 # and if the passed arg fails 'if self.args:' then no formatting 316 # is done. For example, logger.warning('Value is %d', 0) would log 317 # 'Value is %d' instead of 'Value is 0'. 318 # For the use case of passing a dictionary, this should not be a 319 # problem. 320 # Issue #21172: a request was made to relax the isinstance check 321 # to hasattr(args[0], '__getitem__'). However, the docs on string 322 # formatting still seem to suggest a mapping object is required. 323 # Thus, while not removing the isinstance check, it does now look 324 # for collections.abc.Mapping rather than, as before, dict. 325 if (args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping) 326 and args[0]): 327 args = args[0] 328 self.args = args 329 self.levelname = getLevelName(level) 330 self.levelno = level 331 self.pathname = pathname 332 try: 333 self.filename = os.path.basename(pathname) 334 self.module = os.path.splitext(self.filename)[0] 335 except (TypeError, ValueError, AttributeError): 336 self.filename = pathname 337 self.module = "Unknown module" 338 self.exc_info = exc_info 339 self.exc_text = None # used to cache the traceback text 340 self.stack_info = sinfo 341 self.lineno = lineno 342 self.funcName = func 343 self.created = ct / 1e9 # ns to float seconds 344 # Get the number of whole milliseconds (0-999) in the fractional part of seconds. 345 # Eg: 1_677_903_920_999_998_503 ns --> 999_998_503 ns--> 999 ms 346 # Convert to float by adding 0.0 for historical reasons. See gh-89047 347 self.msecs = (ct % 1_000_000_000) // 1_000_000 + 0.0 348 if self.msecs == 999.0 and int(self.created) != ct // 1_000_000_000: 349 # ns -> sec conversion can round up, e.g: 350 # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec 351 self.msecs = 0.0 352 353 self.relativeCreated = (ct - _startTime) / 1e6 354 if logThreads: 355 self.thread = threading.get_ident() 356 self.threadName = threading.current_thread().name 357 else: # pragma: no cover 358 self.thread = None 359 self.threadName = None 360 if not logMultiprocessing: # pragma: no cover 361 self.processName = None 362 else: 363 self.processName = 'MainProcess' 364 mp = sys.modules.get('multiprocessing') 365 if mp is not None: 366 # Errors may occur if multiprocessing has not finished loading 367 # yet - e.g. if a custom import hook causes third-party code 368 # to run when multiprocessing calls import. See issue 8200 369 # for an example 370 try: 371 self.processName = mp.current_process().name 372 except Exception: #pragma: no cover 373 pass 374 if logProcesses and hasattr(os, 'getpid'): 375 self.process = os.getpid() 376 else: 377 self.process = None 378 379 self.taskName = None 380 if logAsyncioTasks: 381 asyncio = sys.modules.get('asyncio') 382 if asyncio: 383 try: 384 self.taskName = asyncio.current_task().get_name() 385 except Exception: 386 pass
Initialize a logging record with interesting information.
392 def getMessage(self): 393 """ 394 Return the message for this LogRecord. 395 396 Return the message for this LogRecord after merging any user-supplied 397 arguments with the message. 398 """ 399 msg = str(self.msg) 400 if self.args: 401 msg = msg % self.args 402 return msg
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied arguments with the message.
1463class Logger(Filterer): 1464 """ 1465 Instances of the Logger class represent a single logging channel. A 1466 "logging channel" indicates an area of an application. Exactly how an 1467 "area" is defined is up to the application developer. Since an 1468 application can have any number of areas, logging channels are identified 1469 by a unique string. Application areas can be nested (e.g. an area 1470 of "input processing" might include sub-areas "read CSV files", "read 1471 XLS files" and "read Gnumeric files"). To cater for this natural nesting, 1472 channel names are organized into a namespace hierarchy where levels are 1473 separated by periods, much like the Java or Python package namespace. So 1474 in the instance given above, channel names might be "input" for the upper 1475 level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. 1476 There is no arbitrary limit to the depth of nesting. 1477 """ 1478 def __init__(self, name, level=NOTSET): 1479 """ 1480 Initialize the logger with a name and an optional level. 1481 """ 1482 Filterer.__init__(self) 1483 self.name = name 1484 self.level = _checkLevel(level) 1485 self.parent = None 1486 self.propagate = True 1487 self.handlers = [] 1488 self.disabled = False 1489 self._cache = {} 1490 1491 def setLevel(self, level): 1492 """ 1493 Set the logging level of this logger. level must be an int or a str. 1494 """ 1495 self.level = _checkLevel(level) 1496 self.manager._clear_cache() 1497 1498 def debug(self, msg, *args, **kwargs): 1499 """ 1500 Log 'msg % args' with severity 'DEBUG'. 1501 1502 To pass exception information, use the keyword argument exc_info with 1503 a true value, e.g. 1504 1505 logger.debug("Houston, we have a %s", "thorny problem", exc_info=True) 1506 """ 1507 if self.isEnabledFor(DEBUG): 1508 self._log(DEBUG, msg, args, **kwargs) 1509 1510 def info(self, msg, *args, **kwargs): 1511 """ 1512 Log 'msg % args' with severity 'INFO'. 1513 1514 To pass exception information, use the keyword argument exc_info with 1515 a true value, e.g. 1516 1517 logger.info("Houston, we have a %s", "notable problem", exc_info=True) 1518 """ 1519 if self.isEnabledFor(INFO): 1520 self._log(INFO, msg, args, **kwargs) 1521 1522 def warning(self, msg, *args, **kwargs): 1523 """ 1524 Log 'msg % args' with severity 'WARNING'. 1525 1526 To pass exception information, use the keyword argument exc_info with 1527 a true value, e.g. 1528 1529 logger.warning("Houston, we have a %s", "bit of a problem", exc_info=True) 1530 """ 1531 if self.isEnabledFor(WARNING): 1532 self._log(WARNING, msg, args, **kwargs) 1533 1534 def warn(self, msg, *args, **kwargs): 1535 warnings.warn("The 'warn' method is deprecated, " 1536 "use 'warning' instead", DeprecationWarning, 2) 1537 self.warning(msg, *args, **kwargs) 1538 1539 def error(self, msg, *args, **kwargs): 1540 """ 1541 Log 'msg % args' with severity 'ERROR'. 1542 1543 To pass exception information, use the keyword argument exc_info with 1544 a true value, e.g. 1545 1546 logger.error("Houston, we have a %s", "major problem", exc_info=True) 1547 """ 1548 if self.isEnabledFor(ERROR): 1549 self._log(ERROR, msg, args, **kwargs) 1550 1551 def exception(self, msg, *args, exc_info=True, **kwargs): 1552 """ 1553 Convenience method for logging an ERROR with exception information. 1554 """ 1555 self.error(msg, *args, exc_info=exc_info, **kwargs) 1556 1557 def critical(self, msg, *args, **kwargs): 1558 """ 1559 Log 'msg % args' with severity 'CRITICAL'. 1560 1561 To pass exception information, use the keyword argument exc_info with 1562 a true value, e.g. 1563 1564 logger.critical("Houston, we have a %s", "major disaster", exc_info=True) 1565 """ 1566 if self.isEnabledFor(CRITICAL): 1567 self._log(CRITICAL, msg, args, **kwargs) 1568 1569 def fatal(self, msg, *args, **kwargs): 1570 """ 1571 Don't use this method, use critical() instead. 1572 """ 1573 self.critical(msg, *args, **kwargs) 1574 1575 def log(self, level, msg, *args, **kwargs): 1576 """ 1577 Log 'msg % args' with the integer severity 'level'. 1578 1579 To pass exception information, use the keyword argument exc_info with 1580 a true value, e.g. 1581 1582 logger.log(level, "We have a %s", "mysterious problem", exc_info=True) 1583 """ 1584 if not isinstance(level, int): 1585 if raiseExceptions: 1586 raise TypeError("level must be an integer") 1587 else: 1588 return 1589 if self.isEnabledFor(level): 1590 self._log(level, msg, args, **kwargs) 1591 1592 def findCaller(self, stack_info=False, stacklevel=1): 1593 """ 1594 Find the stack frame of the caller so that we can note the source 1595 file name, line number and function name. 1596 """ 1597 f = currentframe() 1598 #On some versions of IronPython, currentframe() returns None if 1599 #IronPython isn't run with -X:Frames. 1600 if f is None: 1601 return "(unknown file)", 0, "(unknown function)", None 1602 while stacklevel > 0: 1603 next_f = f.f_back 1604 if next_f is None: 1605 ## We've got options here. 1606 ## If we want to use the last (deepest) frame: 1607 break 1608 ## If we want to mimic the warnings module: 1609 #return ("sys", 1, "(unknown function)", None) 1610 ## If we want to be pedantic: 1611 #raise ValueError("call stack is not deep enough") 1612 f = next_f 1613 if not _is_internal_frame(f): 1614 stacklevel -= 1 1615 co = f.f_code 1616 sinfo = None 1617 if stack_info: 1618 with io.StringIO() as sio: 1619 sio.write("Stack (most recent call last):\n") 1620 traceback.print_stack(f, file=sio) 1621 sinfo = sio.getvalue() 1622 if sinfo[-1] == '\n': 1623 sinfo = sinfo[:-1] 1624 return co.co_filename, f.f_lineno, co.co_name, sinfo 1625 1626 def makeRecord(self, name, level, fn, lno, msg, args, exc_info, 1627 func=None, extra=None, sinfo=None): 1628 """ 1629 A factory method which can be overridden in subclasses to create 1630 specialized LogRecords. 1631 """ 1632 rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func, 1633 sinfo) 1634 if extra is not None: 1635 for key in extra: 1636 if (key in ["message", "asctime"]) or (key in rv.__dict__): 1637 raise KeyError("Attempt to overwrite %r in LogRecord" % key) 1638 rv.__dict__[key] = extra[key] 1639 return rv 1640 1641 def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False, 1642 stacklevel=1): 1643 """ 1644 Low-level logging routine which creates a LogRecord and then calls 1645 all the handlers of this logger to handle the record. 1646 """ 1647 sinfo = None 1648 if _srcfile: 1649 #IronPython doesn't track Python frames, so findCaller raises an 1650 #exception on some versions of IronPython. We trap it here so that 1651 #IronPython can use logging. 1652 try: 1653 fn, lno, func, sinfo = self.findCaller(stack_info, stacklevel) 1654 except ValueError: # pragma: no cover 1655 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1656 else: # pragma: no cover 1657 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1658 if exc_info: 1659 if isinstance(exc_info, BaseException): 1660 exc_info = (type(exc_info), exc_info, exc_info.__traceback__) 1661 elif not isinstance(exc_info, tuple): 1662 exc_info = sys.exc_info() 1663 record = self.makeRecord(self.name, level, fn, lno, msg, args, 1664 exc_info, func, extra, sinfo) 1665 self.handle(record) 1666 1667 def handle(self, record): 1668 """ 1669 Call the handlers for the specified record. 1670 1671 This method is used for unpickled records received from a socket, as 1672 well as those created locally. Logger-level filtering is applied. 1673 """ 1674 if self.disabled: 1675 return 1676 maybe_record = self.filter(record) 1677 if not maybe_record: 1678 return 1679 if isinstance(maybe_record, LogRecord): 1680 record = maybe_record 1681 self.callHandlers(record) 1682 1683 def addHandler(self, hdlr): 1684 """ 1685 Add the specified handler to this logger. 1686 """ 1687 with _lock: 1688 if not (hdlr in self.handlers): 1689 self.handlers.append(hdlr) 1690 1691 def removeHandler(self, hdlr): 1692 """ 1693 Remove the specified handler from this logger. 1694 """ 1695 with _lock: 1696 if hdlr in self.handlers: 1697 self.handlers.remove(hdlr) 1698 1699 def hasHandlers(self): 1700 """ 1701 See if this logger has any handlers configured. 1702 1703 Loop through all handlers for this logger and its parents in the 1704 logger hierarchy. Return True if a handler was found, else False. 1705 Stop searching up the hierarchy whenever a logger with the "propagate" 1706 attribute set to zero is found - that will be the last logger which 1707 is checked for the existence of handlers. 1708 """ 1709 c = self 1710 rv = False 1711 while c: 1712 if c.handlers: 1713 rv = True 1714 break 1715 if not c.propagate: 1716 break 1717 else: 1718 c = c.parent 1719 return rv 1720 1721 def callHandlers(self, record): 1722 """ 1723 Pass a record to all relevant handlers. 1724 1725 Loop through all handlers for this logger and its parents in the 1726 logger hierarchy. If no handler was found, output a one-off error 1727 message to sys.stderr. Stop searching up the hierarchy whenever a 1728 logger with the "propagate" attribute set to zero is found - that 1729 will be the last logger whose handlers are called. 1730 """ 1731 c = self 1732 found = 0 1733 while c: 1734 for hdlr in c.handlers: 1735 found = found + 1 1736 if record.levelno >= hdlr.level: 1737 hdlr.handle(record) 1738 if not c.propagate: 1739 c = None #break out 1740 else: 1741 c = c.parent 1742 if (found == 0): 1743 if lastResort: 1744 if record.levelno >= lastResort.level: 1745 lastResort.handle(record) 1746 elif raiseExceptions and not self.manager.emittedNoHandlerWarning: 1747 sys.stderr.write("No handlers could be found for logger" 1748 " \"%s\"\n" % self.name) 1749 self.manager.emittedNoHandlerWarning = True 1750 1751 def getEffectiveLevel(self): 1752 """ 1753 Get the effective level for this logger. 1754 1755 Loop through this logger and its parents in the logger hierarchy, 1756 looking for a non-zero logging level. Return the first one found. 1757 """ 1758 logger = self 1759 while logger: 1760 if logger.level: 1761 return logger.level 1762 logger = logger.parent 1763 return NOTSET 1764 1765 def isEnabledFor(self, level): 1766 """ 1767 Is this logger enabled for level 'level'? 1768 """ 1769 if self.disabled: 1770 return False 1771 1772 try: 1773 return self._cache[level] 1774 except KeyError: 1775 with _lock: 1776 if self.manager.disable >= level: 1777 is_enabled = self._cache[level] = False 1778 else: 1779 is_enabled = self._cache[level] = ( 1780 level >= self.getEffectiveLevel() 1781 ) 1782 return is_enabled 1783 1784 def getChild(self, suffix): 1785 """ 1786 Get a logger which is a descendant to this one. 1787 1788 This is a convenience method, such that 1789 1790 logging.getLogger('abc').getChild('def.ghi') 1791 1792 is the same as 1793 1794 logging.getLogger('abc.def.ghi') 1795 1796 It's useful, for example, when the parent logger is named using 1797 __name__ rather than a literal string. 1798 """ 1799 if self.root is not self: 1800 suffix = '.'.join((self.name, suffix)) 1801 return self.manager.getLogger(suffix) 1802 1803 def getChildren(self): 1804 1805 def _hierlevel(logger): 1806 if logger is logger.manager.root: 1807 return 0 1808 return 1 + logger.name.count('.') 1809 1810 d = self.manager.loggerDict 1811 with _lock: 1812 # exclude PlaceHolders - the last check is to ensure that lower-level 1813 # descendants aren't returned - if there are placeholders, a logger's 1814 # parent field might point to a grandparent or ancestor thereof. 1815 return set(item for item in d.values() 1816 if isinstance(item, Logger) and item.parent is self and 1817 _hierlevel(item) == 1 + _hierlevel(item.parent)) 1818 1819 def __repr__(self): 1820 level = getLevelName(self.getEffectiveLevel()) 1821 return '<%s %s (%s)>' % (self.__class__.__name__, self.name, level) 1822 1823 def __reduce__(self): 1824 if getLogger(self.name) is not self: 1825 import pickle 1826 raise pickle.PicklingError('logger cannot be pickled') 1827 return getLogger, (self.name,)
Instances of the Logger class represent a single logging channel. A "logging channel" indicates an area of an application. Exactly how an "area" is defined is up to the application developer. Since an application can have any number of areas, logging channels are identified by a unique string. Application areas can be nested (e.g. an area of "input processing" might include sub-areas "read CSV files", "read XLS files" and "read Gnumeric files"). To cater for this natural nesting, channel names are organized into a namespace hierarchy where levels are separated by periods, much like the Java or Python package namespace. So in the instance given above, channel names might be "input" for the upper level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. There is no arbitrary limit to the depth of nesting.
1478 def __init__(self, name, level=NOTSET): 1479 """ 1480 Initialize the logger with a name and an optional level. 1481 """ 1482 Filterer.__init__(self) 1483 self.name = name 1484 self.level = _checkLevel(level) 1485 self.parent = None 1486 self.propagate = True 1487 self.handlers = [] 1488 self.disabled = False 1489 self._cache = {}
Initialize the logger with a name and an optional level.
1491 def setLevel(self, level): 1492 """ 1493 Set the logging level of this logger. level must be an int or a str. 1494 """ 1495 self.level = _checkLevel(level) 1496 self.manager._clear_cache()
Set the logging level of this logger. level must be an int or a str.
1498 def debug(self, msg, *args, **kwargs): 1499 """ 1500 Log 'msg % args' with severity 'DEBUG'. 1501 1502 To pass exception information, use the keyword argument exc_info with 1503 a true value, e.g. 1504 1505 logger.debug("Houston, we have a %s", "thorny problem", exc_info=True) 1506 """ 1507 if self.isEnabledFor(DEBUG): 1508 self._log(DEBUG, msg, args, **kwargs)
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=True)
1510 def info(self, msg, *args, **kwargs): 1511 """ 1512 Log 'msg % args' with severity 'INFO'. 1513 1514 To pass exception information, use the keyword argument exc_info with 1515 a true value, e.g. 1516 1517 logger.info("Houston, we have a %s", "notable problem", exc_info=True) 1518 """ 1519 if self.isEnabledFor(INFO): 1520 self._log(INFO, msg, args, **kwargs)
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.info("Houston, we have a %s", "notable problem", exc_info=True)
1522 def warning(self, msg, *args, **kwargs): 1523 """ 1524 Log 'msg % args' with severity 'WARNING'. 1525 1526 To pass exception information, use the keyword argument exc_info with 1527 a true value, e.g. 1528 1529 logger.warning("Houston, we have a %s", "bit of a problem", exc_info=True) 1530 """ 1531 if self.isEnabledFor(WARNING): 1532 self._log(WARNING, msg, args, **kwargs)
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=True)
1539 def error(self, msg, *args, **kwargs): 1540 """ 1541 Log 'msg % args' with severity 'ERROR'. 1542 1543 To pass exception information, use the keyword argument exc_info with 1544 a true value, e.g. 1545 1546 logger.error("Houston, we have a %s", "major problem", exc_info=True) 1547 """ 1548 if self.isEnabledFor(ERROR): 1549 self._log(ERROR, msg, args, **kwargs)
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=True)
1551 def exception(self, msg, *args, exc_info=True, **kwargs): 1552 """ 1553 Convenience method for logging an ERROR with exception information. 1554 """ 1555 self.error(msg, *args, exc_info=exc_info, **kwargs)
Convenience method for logging an ERROR with exception information.
1557 def critical(self, msg, *args, **kwargs): 1558 """ 1559 Log 'msg % args' with severity 'CRITICAL'. 1560 1561 To pass exception information, use the keyword argument exc_info with 1562 a true value, e.g. 1563 1564 logger.critical("Houston, we have a %s", "major disaster", exc_info=True) 1565 """ 1566 if self.isEnabledFor(CRITICAL): 1567 self._log(CRITICAL, msg, args, **kwargs)
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=True)
1569 def fatal(self, msg, *args, **kwargs): 1570 """ 1571 Don't use this method, use critical() instead. 1572 """ 1573 self.critical(msg, *args, **kwargs)
Don't use this method, use critical() instead.
1575 def log(self, level, msg, *args, **kwargs): 1576 """ 1577 Log 'msg % args' with the integer severity 'level'. 1578 1579 To pass exception information, use the keyword argument exc_info with 1580 a true value, e.g. 1581 1582 logger.log(level, "We have a %s", "mysterious problem", exc_info=True) 1583 """ 1584 if not isinstance(level, int): 1585 if raiseExceptions: 1586 raise TypeError("level must be an integer") 1587 else: 1588 return 1589 if self.isEnabledFor(level): 1590 self._log(level, msg, args, **kwargs)
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=True)
1592 def findCaller(self, stack_info=False, stacklevel=1): 1593 """ 1594 Find the stack frame of the caller so that we can note the source 1595 file name, line number and function name. 1596 """ 1597 f = currentframe() 1598 #On some versions of IronPython, currentframe() returns None if 1599 #IronPython isn't run with -X:Frames. 1600 if f is None: 1601 return "(unknown file)", 0, "(unknown function)", None 1602 while stacklevel > 0: 1603 next_f = f.f_back 1604 if next_f is None: 1605 ## We've got options here. 1606 ## If we want to use the last (deepest) frame: 1607 break 1608 ## If we want to mimic the warnings module: 1609 #return ("sys", 1, "(unknown function)", None) 1610 ## If we want to be pedantic: 1611 #raise ValueError("call stack is not deep enough") 1612 f = next_f 1613 if not _is_internal_frame(f): 1614 stacklevel -= 1 1615 co = f.f_code 1616 sinfo = None 1617 if stack_info: 1618 with io.StringIO() as sio: 1619 sio.write("Stack (most recent call last):\n") 1620 traceback.print_stack(f, file=sio) 1621 sinfo = sio.getvalue() 1622 if sinfo[-1] == '\n': 1623 sinfo = sinfo[:-1] 1624 return co.co_filename, f.f_lineno, co.co_name, sinfo
Find the stack frame of the caller so that we can note the source file name, line number and function name.
1626 def makeRecord(self, name, level, fn, lno, msg, args, exc_info, 1627 func=None, extra=None, sinfo=None): 1628 """ 1629 A factory method which can be overridden in subclasses to create 1630 specialized LogRecords. 1631 """ 1632 rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func, 1633 sinfo) 1634 if extra is not None: 1635 for key in extra: 1636 if (key in ["message", "asctime"]) or (key in rv.__dict__): 1637 raise KeyError("Attempt to overwrite %r in LogRecord" % key) 1638 rv.__dict__[key] = extra[key] 1639 return rv
A factory method which can be overridden in subclasses to create specialized LogRecords.
1667 def handle(self, record): 1668 """ 1669 Call the handlers for the specified record. 1670 1671 This method is used for unpickled records received from a socket, as 1672 well as those created locally. Logger-level filtering is applied. 1673 """ 1674 if self.disabled: 1675 return 1676 maybe_record = self.filter(record) 1677 if not maybe_record: 1678 return 1679 if isinstance(maybe_record, LogRecord): 1680 record = maybe_record 1681 self.callHandlers(record)
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as well as those created locally. Logger-level filtering is applied.
1683 def addHandler(self, hdlr): 1684 """ 1685 Add the specified handler to this logger. 1686 """ 1687 with _lock: 1688 if not (hdlr in self.handlers): 1689 self.handlers.append(hdlr)
Add the specified handler to this logger.
1691 def removeHandler(self, hdlr): 1692 """ 1693 Remove the specified handler from this logger. 1694 """ 1695 with _lock: 1696 if hdlr in self.handlers: 1697 self.handlers.remove(hdlr)
Remove the specified handler from this logger.
1699 def hasHandlers(self): 1700 """ 1701 See if this logger has any handlers configured. 1702 1703 Loop through all handlers for this logger and its parents in the 1704 logger hierarchy. Return True if a handler was found, else False. 1705 Stop searching up the hierarchy whenever a logger with the "propagate" 1706 attribute set to zero is found - that will be the last logger which 1707 is checked for the existence of handlers. 1708 """ 1709 c = self 1710 rv = False 1711 while c: 1712 if c.handlers: 1713 rv = True 1714 break 1715 if not c.propagate: 1716 break 1717 else: 1718 c = c.parent 1719 return rv
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the logger hierarchy. Return True if a handler was found, else False. Stop searching up the hierarchy whenever a logger with the "propagate" attribute set to zero is found - that will be the last logger which is checked for the existence of handlers.
1721 def callHandlers(self, record): 1722 """ 1723 Pass a record to all relevant handlers. 1724 1725 Loop through all handlers for this logger and its parents in the 1726 logger hierarchy. If no handler was found, output a one-off error 1727 message to sys.stderr. Stop searching up the hierarchy whenever a 1728 logger with the "propagate" attribute set to zero is found - that 1729 will be the last logger whose handlers are called. 1730 """ 1731 c = self 1732 found = 0 1733 while c: 1734 for hdlr in c.handlers: 1735 found = found + 1 1736 if record.levelno >= hdlr.level: 1737 hdlr.handle(record) 1738 if not c.propagate: 1739 c = None #break out 1740 else: 1741 c = c.parent 1742 if (found == 0): 1743 if lastResort: 1744 if record.levelno >= lastResort.level: 1745 lastResort.handle(record) 1746 elif raiseExceptions and not self.manager.emittedNoHandlerWarning: 1747 sys.stderr.write("No handlers could be found for logger" 1748 " \"%s\"\n" % self.name) 1749 self.manager.emittedNoHandlerWarning = True
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the logger hierarchy. If no handler was found, output a one-off error message to sys.stderr. Stop searching up the hierarchy whenever a logger with the "propagate" attribute set to zero is found - that will be the last logger whose handlers are called.
1751 def getEffectiveLevel(self): 1752 """ 1753 Get the effective level for this logger. 1754 1755 Loop through this logger and its parents in the logger hierarchy, 1756 looking for a non-zero logging level. Return the first one found. 1757 """ 1758 logger = self 1759 while logger: 1760 if logger.level: 1761 return logger.level 1762 logger = logger.parent 1763 return NOTSET
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy, looking for a non-zero logging level. Return the first one found.
1765 def isEnabledFor(self, level): 1766 """ 1767 Is this logger enabled for level 'level'? 1768 """ 1769 if self.disabled: 1770 return False 1771 1772 try: 1773 return self._cache[level] 1774 except KeyError: 1775 with _lock: 1776 if self.manager.disable >= level: 1777 is_enabled = self._cache[level] = False 1778 else: 1779 is_enabled = self._cache[level] = ( 1780 level >= self.getEffectiveLevel() 1781 ) 1782 return is_enabled
Is this logger enabled for level 'level'?
1784 def getChild(self, suffix): 1785 """ 1786 Get a logger which is a descendant to this one. 1787 1788 This is a convenience method, such that 1789 1790 logging.getLogger('abc').getChild('def.ghi') 1791 1792 is the same as 1793 1794 logging.getLogger('abc.def.ghi') 1795 1796 It's useful, for example, when the parent logger is named using 1797 __name__ rather than a literal string. 1798 """ 1799 if self.root is not self: 1800 suffix = '.'.join((self.name, suffix)) 1801 return self.manager.getLogger(suffix)
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using __name__ rather than a literal string.
1803 def getChildren(self): 1804 1805 def _hierlevel(logger): 1806 if logger is logger.manager.root: 1807 return 0 1808 return 1 + logger.name.count('.') 1809 1810 d = self.manager.loggerDict 1811 with _lock: 1812 # exclude PlaceHolders - the last check is to ensure that lower-level 1813 # descendants aren't returned - if there are placeholders, a logger's 1814 # parent field might point to a grandparent or ancestor thereof. 1815 return set(item for item in d.values() 1816 if isinstance(item, Logger) and item.parent is self and 1817 _hierlevel(item) == 1 + _hierlevel(item.parent))
Inherited Members
1847class LoggerAdapter(object): 1848 """ 1849 An adapter for loggers which makes it easier to specify contextual 1850 information in logging output. 1851 """ 1852 1853 def __init__(self, logger, extra=None, merge_extra=False): 1854 """ 1855 Initialize the adapter with a logger and a dict-like object which 1856 provides contextual information. This constructor signature allows 1857 easy stacking of LoggerAdapters, if so desired. 1858 1859 You can effectively pass keyword arguments as shown in the 1860 following example: 1861 1862 adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) 1863 1864 By default, LoggerAdapter objects will drop the "extra" argument 1865 passed on the individual log calls to use its own instead. 1866 1867 Initializing it with merge_extra=True will instead merge both 1868 maps when logging, the individual call extra taking precedence 1869 over the LoggerAdapter instance extra 1870 1871 .. versionchanged:: 3.13 1872 The *merge_extra* argument was added. 1873 """ 1874 self.logger = logger 1875 self.extra = extra 1876 self.merge_extra = merge_extra 1877 1878 def process(self, msg, kwargs): 1879 """ 1880 Process the logging message and keyword arguments passed in to 1881 a logging call to insert contextual information. You can either 1882 manipulate the message itself, the keyword args or both. Return 1883 the message and kwargs modified (or not) to suit your needs. 1884 1885 Normally, you'll only need to override this one method in a 1886 LoggerAdapter subclass for your specific needs. 1887 """ 1888 if self.merge_extra and "extra" in kwargs: 1889 kwargs["extra"] = {**self.extra, **kwargs["extra"]} 1890 else: 1891 kwargs["extra"] = self.extra 1892 return msg, kwargs 1893 1894 # 1895 # Boilerplate convenience methods 1896 # 1897 def debug(self, msg, *args, **kwargs): 1898 """ 1899 Delegate a debug call to the underlying logger. 1900 """ 1901 self.log(DEBUG, msg, *args, **kwargs) 1902 1903 def info(self, msg, *args, **kwargs): 1904 """ 1905 Delegate an info call to the underlying logger. 1906 """ 1907 self.log(INFO, msg, *args, **kwargs) 1908 1909 def warning(self, msg, *args, **kwargs): 1910 """ 1911 Delegate a warning call to the underlying logger. 1912 """ 1913 self.log(WARNING, msg, *args, **kwargs) 1914 1915 def warn(self, msg, *args, **kwargs): 1916 warnings.warn("The 'warn' method is deprecated, " 1917 "use 'warning' instead", DeprecationWarning, 2) 1918 self.warning(msg, *args, **kwargs) 1919 1920 def error(self, msg, *args, **kwargs): 1921 """ 1922 Delegate an error call to the underlying logger. 1923 """ 1924 self.log(ERROR, msg, *args, **kwargs) 1925 1926 def exception(self, msg, *args, exc_info=True, **kwargs): 1927 """ 1928 Delegate an exception call to the underlying logger. 1929 """ 1930 self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs) 1931 1932 def critical(self, msg, *args, **kwargs): 1933 """ 1934 Delegate a critical call to the underlying logger. 1935 """ 1936 self.log(CRITICAL, msg, *args, **kwargs) 1937 1938 def log(self, level, msg, *args, **kwargs): 1939 """ 1940 Delegate a log call to the underlying logger, after adding 1941 contextual information from this adapter instance. 1942 """ 1943 if self.isEnabledFor(level): 1944 msg, kwargs = self.process(msg, kwargs) 1945 self.logger.log(level, msg, *args, **kwargs) 1946 1947 def isEnabledFor(self, level): 1948 """ 1949 Is this logger enabled for level 'level'? 1950 """ 1951 return self.logger.isEnabledFor(level) 1952 1953 def setLevel(self, level): 1954 """ 1955 Set the specified level on the underlying logger. 1956 """ 1957 self.logger.setLevel(level) 1958 1959 def getEffectiveLevel(self): 1960 """ 1961 Get the effective level for the underlying logger. 1962 """ 1963 return self.logger.getEffectiveLevel() 1964 1965 def hasHandlers(self): 1966 """ 1967 See if the underlying logger has any handlers. 1968 """ 1969 return self.logger.hasHandlers() 1970 1971 def _log(self, level, msg, args, **kwargs): 1972 """ 1973 Low-level log implementation, proxied to allow nested logger adapters. 1974 """ 1975 return self.logger._log(level, msg, args, **kwargs) 1976 1977 @property 1978 def manager(self): 1979 return self.logger.manager 1980 1981 @manager.setter 1982 def manager(self, value): 1983 self.logger.manager = value 1984 1985 @property 1986 def name(self): 1987 return self.logger.name 1988 1989 def __repr__(self): 1990 logger = self.logger 1991 level = getLevelName(logger.getEffectiveLevel()) 1992 return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level) 1993 1994 __class_getitem__ = classmethod(GenericAlias)
An adapter for loggers which makes it easier to specify contextual information in logging output.
1853 def __init__(self, logger, extra=None, merge_extra=False): 1854 """ 1855 Initialize the adapter with a logger and a dict-like object which 1856 provides contextual information. This constructor signature allows 1857 easy stacking of LoggerAdapters, if so desired. 1858 1859 You can effectively pass keyword arguments as shown in the 1860 following example: 1861 1862 adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) 1863 1864 By default, LoggerAdapter objects will drop the "extra" argument 1865 passed on the individual log calls to use its own instead. 1866 1867 Initializing it with merge_extra=True will instead merge both 1868 maps when logging, the individual call extra taking precedence 1869 over the LoggerAdapter instance extra 1870 1871 .. versionchanged:: 3.13 1872 The *merge_extra* argument was added. 1873 """ 1874 self.logger = logger 1875 self.extra = extra 1876 self.merge_extra = merge_extra
Initialize the adapter with a logger and a dict-like object which provides contextual information. This constructor signature allows easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
By default, LoggerAdapter objects will drop the "extra" argument passed on the individual log calls to use its own instead.
Initializing it with merge_extra=True will instead merge both maps when logging, the individual call extra taking precedence over the LoggerAdapter instance extra
Changed in version 3.13: The merge_extra argument was added.
1878 def process(self, msg, kwargs): 1879 """ 1880 Process the logging message and keyword arguments passed in to 1881 a logging call to insert contextual information. You can either 1882 manipulate the message itself, the keyword args or both. Return 1883 the message and kwargs modified (or not) to suit your needs. 1884 1885 Normally, you'll only need to override this one method in a 1886 LoggerAdapter subclass for your specific needs. 1887 """ 1888 if self.merge_extra and "extra" in kwargs: 1889 kwargs["extra"] = {**self.extra, **kwargs["extra"]} 1890 else: 1891 kwargs["extra"] = self.extra 1892 return msg, kwargs
Process the logging message and keyword arguments passed in to a logging call to insert contextual information. You can either manipulate the message itself, the keyword args or both. Return the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a LoggerAdapter subclass for your specific needs.
1897 def debug(self, msg, *args, **kwargs): 1898 """ 1899 Delegate a debug call to the underlying logger. 1900 """ 1901 self.log(DEBUG, msg, *args, **kwargs)
Delegate a debug call to the underlying logger.
1903 def info(self, msg, *args, **kwargs): 1904 """ 1905 Delegate an info call to the underlying logger. 1906 """ 1907 self.log(INFO, msg, *args, **kwargs)
Delegate an info call to the underlying logger.
1909 def warning(self, msg, *args, **kwargs): 1910 """ 1911 Delegate a warning call to the underlying logger. 1912 """ 1913 self.log(WARNING, msg, *args, **kwargs)
Delegate a warning call to the underlying logger.
1920 def error(self, msg, *args, **kwargs): 1921 """ 1922 Delegate an error call to the underlying logger. 1923 """ 1924 self.log(ERROR, msg, *args, **kwargs)
Delegate an error call to the underlying logger.
1926 def exception(self, msg, *args, exc_info=True, **kwargs): 1927 """ 1928 Delegate an exception call to the underlying logger. 1929 """ 1930 self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs)
Delegate an exception call to the underlying logger.
1932 def critical(self, msg, *args, **kwargs): 1933 """ 1934 Delegate a critical call to the underlying logger. 1935 """ 1936 self.log(CRITICAL, msg, *args, **kwargs)
Delegate a critical call to the underlying logger.
1938 def log(self, level, msg, *args, **kwargs): 1939 """ 1940 Delegate a log call to the underlying logger, after adding 1941 contextual information from this adapter instance. 1942 """ 1943 if self.isEnabledFor(level): 1944 msg, kwargs = self.process(msg, kwargs) 1945 self.logger.log(level, msg, *args, **kwargs)
Delegate a log call to the underlying logger, after adding contextual information from this adapter instance.
1947 def isEnabledFor(self, level): 1948 """ 1949 Is this logger enabled for level 'level'? 1950 """ 1951 return self.logger.isEnabledFor(level)
Is this logger enabled for level 'level'?
1953 def setLevel(self, level): 1954 """ 1955 Set the specified level on the underlying logger. 1956 """ 1957 self.logger.setLevel(level)
Set the specified level on the underlying logger.
1959 def getEffectiveLevel(self): 1960 """ 1961 Get the effective level for the underlying logger. 1962 """ 1963 return self.logger.getEffectiveLevel()
Get the effective level for the underlying logger.
2265class NullHandler(Handler): 2266 """ 2267 This handler does nothing. It's intended to be used to avoid the 2268 "No handlers could be found for logger XXX" one-off warning. This is 2269 important for library code, which may contain code to log events. If a user 2270 of the library does not configure logging, the one-off warning might be 2271 produced; to avoid this, the library developer simply needs to instantiate 2272 a NullHandler and add it to the top-level logger of the library module or 2273 package. 2274 """ 2275 def handle(self, record): 2276 """Stub.""" 2277 2278 def emit(self, record): 2279 """Stub.""" 2280 2281 def createLock(self): 2282 self.lock = None 2283 2284 def _at_fork_reinit(self): 2285 pass
This handler does nothing. It's intended to be used to avoid the "No handlers could be found for logger XXX" one-off warning. This is important for library code, which may contain code to log events. If a user of the library does not configure logging, the one-off warning might be produced; to avoid this, the library developer simply needs to instantiate a NullHandler and add it to the top-level logger of the library module or package.
1111class StreamHandler(Handler): 1112 """ 1113 A handler class which writes logging records, appropriately formatted, 1114 to a stream. Note that this class does not close the stream, as 1115 sys.stdout or sys.stderr may be used. 1116 """ 1117 1118 terminator = '\n' 1119 1120 def __init__(self, stream=None): 1121 """ 1122 Initialize the handler. 1123 1124 If stream is not specified, sys.stderr is used. 1125 """ 1126 Handler.__init__(self) 1127 if stream is None: 1128 stream = sys.stderr 1129 self.stream = stream 1130 1131 def flush(self): 1132 """ 1133 Flushes the stream. 1134 """ 1135 with self.lock: 1136 if self.stream and hasattr(self.stream, "flush"): 1137 self.stream.flush() 1138 1139 def emit(self, record): 1140 """ 1141 Emit a record. 1142 1143 If a formatter is specified, it is used to format the record. 1144 The record is then written to the stream with a trailing newline. If 1145 exception information is present, it is formatted using 1146 traceback.print_exception and appended to the stream. If the stream 1147 has an 'encoding' attribute, it is used to determine how to do the 1148 output to the stream. 1149 """ 1150 try: 1151 msg = self.format(record) 1152 stream = self.stream 1153 # issue 35046: merged two stream.writes into one. 1154 stream.write(msg + self.terminator) 1155 self.flush() 1156 except RecursionError: # See issue 36272 1157 raise 1158 except Exception: 1159 self.handleError(record) 1160 1161 def setStream(self, stream): 1162 """ 1163 Sets the StreamHandler's stream to the specified value, 1164 if it is different. 1165 1166 Returns the old stream, if the stream was changed, or None 1167 if it wasn't. 1168 """ 1169 if stream is self.stream: 1170 result = None 1171 else: 1172 result = self.stream 1173 with self.lock: 1174 self.flush() 1175 self.stream = stream 1176 return result 1177 1178 def __repr__(self): 1179 level = getLevelName(self.level) 1180 name = getattr(self.stream, 'name', '') 1181 # bpo-36015: name can be an int 1182 name = str(name) 1183 if name: 1184 name += ' ' 1185 return '<%s %s(%s)>' % (self.__class__.__name__, name, level) 1186 1187 __class_getitem__ = classmethod(GenericAlias)
A handler class which writes logging records, appropriately formatted, to a stream. Note that this class does not close the stream, as sys.stdout or sys.stderr may be used.
1120 def __init__(self, stream=None): 1121 """ 1122 Initialize the handler. 1123 1124 If stream is not specified, sys.stderr is used. 1125 """ 1126 Handler.__init__(self) 1127 if stream is None: 1128 stream = sys.stderr 1129 self.stream = stream
Initialize the handler.
If stream is not specified, sys.stderr is used.
1131 def flush(self): 1132 """ 1133 Flushes the stream. 1134 """ 1135 with self.lock: 1136 if self.stream and hasattr(self.stream, "flush"): 1137 self.stream.flush()
Flushes the stream.
1139 def emit(self, record): 1140 """ 1141 Emit a record. 1142 1143 If a formatter is specified, it is used to format the record. 1144 The record is then written to the stream with a trailing newline. If 1145 exception information is present, it is formatted using 1146 traceback.print_exception and appended to the stream. If the stream 1147 has an 'encoding' attribute, it is used to determine how to do the 1148 output to the stream. 1149 """ 1150 try: 1151 msg = self.format(record) 1152 stream = self.stream 1153 # issue 35046: merged two stream.writes into one. 1154 stream.write(msg + self.terminator) 1155 self.flush() 1156 except RecursionError: # See issue 36272 1157 raise 1158 except Exception: 1159 self.handleError(record)
Emit a record.
If a formatter is specified, it is used to format the record. The record is then written to the stream with a trailing newline. If exception information is present, it is formatted using traceback.print_exception and appended to the stream. If the stream has an 'encoding' attribute, it is used to determine how to do the output to the stream.
1161 def setStream(self, stream): 1162 """ 1163 Sets the StreamHandler's stream to the specified value, 1164 if it is different. 1165 1166 Returns the old stream, if the stream was changed, or None 1167 if it wasn't. 1168 """ 1169 if stream is self.stream: 1170 result = None 1171 else: 1172 result = self.stream 1173 with self.lock: 1174 self.flush() 1175 self.stream = stream 1176 return result
Sets the StreamHandler's stream to the specified value, if it is different.
Returns the old stream, if the stream was changed, or None if it wasn't.
157def addLevelName(level, levelName): 158 """ 159 Associate 'levelName' with 'level'. 160 161 This is used when converting levels to text during message formatting. 162 """ 163 with _lock: 164 _levelToName[level] = levelName 165 _nameToLevel[levelName] = level
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
2004def basicConfig(**kwargs): 2005 """ 2006 Do basic configuration for the logging system. 2007 2008 This function does nothing if the root logger already has handlers 2009 configured, unless the keyword argument *force* is set to ``True``. 2010 It is a convenience method intended for use by simple scripts 2011 to do one-shot configuration of the logging package. 2012 2013 The default behaviour is to create a StreamHandler which writes to 2014 sys.stderr, set a formatter using the BASIC_FORMAT format string, and 2015 add the handler to the root logger. 2016 2017 A number of optional keyword arguments may be specified, which can alter 2018 the default behaviour. 2019 2020 filename Specifies that a FileHandler be created, using the specified 2021 filename, rather than a StreamHandler. 2022 filemode Specifies the mode to open the file, if filename is specified 2023 (if filemode is unspecified, it defaults to 'a'). 2024 format Use the specified format string for the handler. 2025 datefmt Use the specified date/time format. 2026 style If a format string is specified, use this to specify the 2027 type of format string (possible values '%', '{', '$', for 2028 %-formatting, :meth:`str.format` and :class:`string.Template` 2029 - defaults to '%'). 2030 level Set the root logger level to the specified level. 2031 stream Use the specified stream to initialize the StreamHandler. Note 2032 that this argument is incompatible with 'filename' - if both 2033 are present, 'stream' is ignored. 2034 handlers If specified, this should be an iterable of already created 2035 handlers, which will be added to the root logger. Any handler 2036 in the list which does not have a formatter assigned will be 2037 assigned the formatter created in this function. 2038 force If this keyword is specified as true, any existing handlers 2039 attached to the root logger are removed and closed, before 2040 carrying out the configuration as specified by the other 2041 arguments. 2042 encoding If specified together with a filename, this encoding is passed to 2043 the created FileHandler, causing it to be used when the file is 2044 opened. 2045 errors If specified together with a filename, this value is passed to the 2046 created FileHandler, causing it to be used when the file is 2047 opened in text mode. If not specified, the default value is 2048 `backslashreplace`. 2049 2050 Note that you could specify a stream created using open(filename, mode) 2051 rather than passing the filename and mode in. However, it should be 2052 remembered that StreamHandler does not close its stream (since it may be 2053 using sys.stdout or sys.stderr), whereas FileHandler closes its stream 2054 when the handler is closed. 2055 2056 .. versionchanged:: 3.2 2057 Added the ``style`` parameter. 2058 2059 .. versionchanged:: 3.3 2060 Added the ``handlers`` parameter. A ``ValueError`` is now thrown for 2061 incompatible arguments (e.g. ``handlers`` specified together with 2062 ``filename``/``filemode``, or ``filename``/``filemode`` specified 2063 together with ``stream``, or ``handlers`` specified together with 2064 ``stream``. 2065 2066 .. versionchanged:: 3.8 2067 Added the ``force`` parameter. 2068 2069 .. versionchanged:: 3.9 2070 Added the ``encoding`` and ``errors`` parameters. 2071 """ 2072 # Add thread safety in case someone mistakenly calls 2073 # basicConfig() from multiple threads 2074 with _lock: 2075 force = kwargs.pop('force', False) 2076 encoding = kwargs.pop('encoding', None) 2077 errors = kwargs.pop('errors', 'backslashreplace') 2078 if force: 2079 for h in root.handlers[:]: 2080 root.removeHandler(h) 2081 h.close() 2082 if len(root.handlers) == 0: 2083 handlers = kwargs.pop("handlers", None) 2084 if handlers is None: 2085 if "stream" in kwargs and "filename" in kwargs: 2086 raise ValueError("'stream' and 'filename' should not be " 2087 "specified together") 2088 else: 2089 if "stream" in kwargs or "filename" in kwargs: 2090 raise ValueError("'stream' or 'filename' should not be " 2091 "specified together with 'handlers'") 2092 if handlers is None: 2093 filename = kwargs.pop("filename", None) 2094 mode = kwargs.pop("filemode", 'a') 2095 if filename: 2096 if 'b' in mode: 2097 errors = None 2098 else: 2099 encoding = io.text_encoding(encoding) 2100 h = FileHandler(filename, mode, 2101 encoding=encoding, errors=errors) 2102 else: 2103 stream = kwargs.pop("stream", None) 2104 h = StreamHandler(stream) 2105 handlers = [h] 2106 dfs = kwargs.pop("datefmt", None) 2107 style = kwargs.pop("style", '%') 2108 if style not in _STYLES: 2109 raise ValueError('Style must be one of: %s' % ','.join( 2110 _STYLES.keys())) 2111 fs = kwargs.pop("format", _STYLES[style][1]) 2112 fmt = Formatter(fs, dfs, style) 2113 for h in handlers: 2114 if h.formatter is None: 2115 h.setFormatter(fmt) 2116 root.addHandler(h) 2117 level = kwargs.pop("level", None) 2118 if level is not None: 2119 root.setLevel(level) 2120 if kwargs: 2121 keys = ', '.join(kwargs.keys()) 2122 raise ValueError('Unrecognised argument(s): %s' % keys)
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured, unless the keyword argument force is set to True
.
It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to sys.stderr, set a formatter using the BASIC_FORMAT format string, and add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, str.format()
and string.Template
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root logger. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
force If this keyword is specified as true, any existing handlers
attached to the root logger are removed and closed, before
carrying out the configuration as specified by the other
arguments.
encoding If specified together with a filename, this encoding is passed to
the created FileHandler, causing it to be used when the file is
opened.
errors If specified together with a filename, this value is passed to the
created FileHandler, causing it to be used when the file is
opened in text mode. If not specified, the default value is
backslashreplace
.
Note that you could specify a stream created using open(filename, mode) rather than passing the filename and mode in. However, it should be remembered that StreamHandler does not close its stream (since it may be using sys.stdout or sys.stderr), whereas FileHandler closes its stream when the handler is closed.
Changed in version 3.2:
Added the style
parameter.
Changed in version 3.3:
Added the handlers
parameter. A ValueError
is now thrown for
incompatible arguments (e.g. handlers
specified together with
filename
/filemode
, or filename
/filemode
specified
together with stream
, or handlers
specified together with
stream
.
Changed in version 3.8:
Added the force
parameter.
Changed in version 3.9:
Added the encoding
and errors
parameters.
2311def captureWarnings(capture): 2312 """ 2313 If capture is true, redirect all warnings to the logging package. 2314 If capture is False, ensure that warnings are not redirected to logging 2315 but to their original destinations. 2316 """ 2317 global _warnings_showwarning 2318 if capture: 2319 if _warnings_showwarning is None: 2320 _warnings_showwarning = warnings.showwarning 2321 warnings.showwarning = _showwarning 2322 else: 2323 if _warnings_showwarning is not None: 2324 warnings.showwarning = _warnings_showwarning 2325 _warnings_showwarning = None
If capture is true, redirect all warnings to the logging package. If capture is False, ensure that warnings are not redirected to logging but to their original destinations.
2139def critical(msg, *args, **kwargs): 2140 """ 2141 Log a message with severity 'CRITICAL' on the root logger. If the logger 2142 has no handlers, call basicConfig() to add a console handler with a 2143 pre-defined format. 2144 """ 2145 if len(root.handlers) == 0: 2146 basicConfig() 2147 root.critical(msg, *args, **kwargs)
Log a message with severity 'CRITICAL' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
2198def debug(msg, *args, **kwargs): 2199 """ 2200 Log a message with severity 'DEBUG' on the root logger. If the logger has 2201 no handlers, call basicConfig() to add a console handler with a pre-defined 2202 format. 2203 """ 2204 if len(root.handlers) == 0: 2205 basicConfig() 2206 root.debug(msg, *args, **kwargs)
Log a message with severity 'DEBUG' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
2218def disable(level=CRITICAL): 2219 """ 2220 Disable all logging calls of severity 'level' and below. 2221 """ 2222 root.manager.disable = level 2223 root.manager._clear_cache()
Disable all logging calls of severity 'level' and below.
2155def error(msg, *args, **kwargs): 2156 """ 2157 Log a message with severity 'ERROR' on the root logger. If the logger has 2158 no handlers, call basicConfig() to add a console handler with a pre-defined 2159 format. 2160 """ 2161 if len(root.handlers) == 0: 2162 basicConfig() 2163 root.error(msg, *args, **kwargs)
Log a message with severity 'ERROR' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
2165def exception(msg, *args, exc_info=True, **kwargs): 2166 """ 2167 Log a message with severity 'ERROR' on the root logger, with exception 2168 information. If the logger has no handlers, basicConfig() is called to add 2169 a console handler with a pre-defined format. 2170 """ 2171 error(msg, *args, exc_info=exc_info, **kwargs)
Log a message with severity 'ERROR' on the root logger, with exception information. If the logger has no handlers, basicConfig() is called to add a console handler with a pre-defined format.
2149def fatal(msg, *args, **kwargs): 2150 """ 2151 Don't use this function, use critical() instead. 2152 """ 2153 critical(msg, *args, **kwargs)
Don't use this function, use critical() instead.
130def getLevelName(level): 131 """ 132 Return the textual or numeric representation of logging level 'level'. 133 134 If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, 135 INFO, DEBUG) then you get the corresponding string. If you have 136 associated levels with names using addLevelName then the name you have 137 associated with 'level' is returned. 138 139 If a numeric value corresponding to one of the defined levels is passed 140 in, the corresponding string representation is returned. 141 142 If a string representation of the level is passed in, the corresponding 143 numeric value is returned. 144 145 If no matching numeric or string value is passed in, the string 146 'Level %s' % level is returned. 147 """ 148 # See Issues #22386, #27937 and #29220 for why it's this way 149 result = _levelToName.get(level) 150 if result is not None: 151 return result 152 result = _nameToLevel.get(level) 153 if result is not None: 154 return result 155 return "Level %s" % level
Return the textual or numeric representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, INFO, DEBUG) then you get the corresponding string. If you have associated levels with names using addLevelName then the name you have associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed in, the corresponding string representation is returned.
If a string representation of the level is passed in, the corresponding numeric value is returned.
If no matching numeric or string value is passed in, the string 'Level %s' % level is returned.
2129def getLogger(name=None): 2130 """ 2131 Return a logger with the specified name, creating it if necessary. 2132 2133 If no name is specified, return the root logger. 2134 """ 2135 if not name or isinstance(name, str) and name == root.name: 2136 return root 2137 return Logger.manager.getLogger(name)
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
1332def getLoggerClass(): 1333 """ 1334 Return the class to be used when instantiating a logger. 1335 """ 1336 return _loggerClass
Return the class to be used when instantiating a logger.
2188def info(msg, *args, **kwargs): 2189 """ 2190 Log a message with severity 'INFO' on the root logger. If the logger has 2191 no handlers, call basicConfig() to add a console handler with a pre-defined 2192 format. 2193 """ 2194 if len(root.handlers) == 0: 2195 basicConfig() 2196 root.info(msg, *args, **kwargs)
Log a message with severity 'INFO' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
2208def log(level, msg, *args, **kwargs): 2209 """ 2210 Log 'msg % args' with the integer severity 'level' on the root logger. If 2211 the logger has no handlers, call basicConfig() to add a console handler 2212 with a pre-defined format. 2213 """ 2214 if len(root.handlers) == 0: 2215 basicConfig() 2216 root.log(level, msg, *args, **kwargs)
Log 'msg % args' with the integer severity 'level' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
426def makeLogRecord(dict): 427 """ 428 Make a LogRecord whose attributes are defined by the specified dictionary, 429 This function is useful for converting a logging event received over 430 a socket connection (which is sent as a dictionary) into a LogRecord 431 instance. 432 """ 433 rv = _logRecordFactory(None, None, "", 0, "", (), None, None) 434 rv.__dict__.update(dict) 435 return rv
Make a LogRecord whose attributes are defined by the specified dictionary, This function is useful for converting a logging event received over a socket connection (which is sent as a dictionary) into a LogRecord instance.
1319def setLoggerClass(klass): 1320 """ 1321 Set the class to be used when instantiating a logger. The class should 1322 define __init__() such that only a name argument is required, and the 1323 __init__() should call Logger.__init__() 1324 """ 1325 if klass != Logger: 1326 if not issubclass(klass, Logger): 1327 raise TypeError("logger not derived from logging.Logger: " 1328 + klass.__name__) 1329 global _loggerClass 1330 _loggerClass = klass
Set the class to be used when instantiating a logger. The class should define __init__() such that only a name argument is required, and the __init__() should call Logger.__init__()
2225def shutdown(handlerList=_handlerList): 2226 """ 2227 Perform any cleanup actions in the logging system (e.g. flushing 2228 buffers). 2229 2230 Should be called at application exit. 2231 """ 2232 for wr in reversed(handlerList[:]): 2233 #errors might occur, for example, if files are locked 2234 #we just ignore them if raiseExceptions is not set 2235 try: 2236 h = wr() 2237 if h: 2238 try: 2239 h.acquire() 2240 # MemoryHandlers might not want to be flushed on close, 2241 # but circular imports prevent us scoping this to just 2242 # those handlers. hence the default to True. 2243 if getattr(h, 'flushOnClose', True): 2244 h.flush() 2245 h.close() 2246 except (OSError, ValueError): 2247 # Ignore errors which might be caused 2248 # because handlers have been closed but 2249 # references to them are still around at 2250 # application exit. 2251 pass 2252 finally: 2253 h.release() 2254 except: # ignore everything, as we're shutting down 2255 if raiseExceptions: 2256 raise 2257 #else, swallow
Perform any cleanup actions in the logging system (e.g. flushing buffers).
Should be called at application exit.
2173def warning(msg, *args, **kwargs): 2174 """ 2175 Log a message with severity 'WARNING' on the root logger. If the logger has 2176 no handlers, call basicConfig() to add a console handler with a pre-defined 2177 format. 2178 """ 2179 if len(root.handlers) == 0: 2180 basicConfig() 2181 root.warning(msg, *args, **kwargs)
Log a message with severity 'WARNING' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
419def getLogRecordFactory(): 420 """ 421 Return the factory to be used when instantiating a log record. 422 """ 423 424 return _logRecordFactory
Return the factory to be used when instantiating a log record.
409def setLogRecordFactory(factory): 410 """ 411 Set the factory to be used when instantiating a log record. 412 413 :param factory: A callable which will be called to instantiate 414 a log record. 415 """ 416 global _logRecordFactory 417 _logRecordFactory = factory
Set the factory to be used when instantiating a log record.
Parameters
- factory: A callable which will be called to instantiate a log record.
907def getHandlerByName(name): 908 """ 909 Get a handler with the specified *name*, or None if there isn't one with 910 that name. 911 """ 912 return _handlers.get(name)
Get a handler with the specified name, or None if there isn't one with that name.
915def getHandlerNames(): 916 """ 917 Return all known handler names as an immutable set. 918 """ 919 return frozenset(_handlers)
Return all known handler names as an immutable set.