python模块:logging
1 # Copyright 2001-2016 by Vinay Sajip. All Rights Reserved. 2 # 3 # Permission to use, copy, modify, and distribute this software and its 4 # documentation for any purpose and without fee is hereby granted, 5 # provided that the above copyright notice appear in all copies and that 6 # both that copyright notice and this permission notice appear in 7 # supporting documentation, and that the name of Vinay Sajip 8 # not be used in advertising or publicity pertaining to distribution 9 # of the software without specific, written prior permission. 10 # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING 11 # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL 12 # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR 13 # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER 14 # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 15 # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 17 """ 18 Logging package for Python. Based on PEP 282 and comments thereto in 19 comp.lang.python. 20 21 Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved. 22 23 To use, simply 'import logging' and log away! 24 """ 25 26 import sys, os, time, io, traceback, warnings, weakref, collections 27 28 from string import Template 29 30 __all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR', 31 'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO', 32 'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler', 33 'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig', 34 'captureWarnings', 'critical', 'debug', 'disable', 'error', 35 'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass', 36 'info', 'log', 'makeLogRecord', 'setLoggerClass', 'shutdown', 37 'warn', 'warning', 'getLogRecordFactory', 'setLogRecordFactory', 38 'lastResort', 'raiseExceptions'] 39 40 try: 41 import threading 42 except ImportError: #pragma: no cover 43 threading = None 44 45 __author__ = "Vinay Sajip <vinay_sajip@red-dove.com>" 46 __status__ = "production" 47 # The following module attributes are no longer updated. 48 __version__ = "0.5.1.2" 49 __date__ = "07 February 2010" 50 51 #--------------------------------------------------------------------------- 52 # Miscellaneous module data 53 #--------------------------------------------------------------------------- 54 55 # 56 #_startTime is used as the base when calculating the relative time of events 57 # 58 _startTime = time.time() 59 60 # 61 #raiseExceptions is used to see if exceptions during handling should be 62 #propagated 63 # 64 raiseExceptions = True 65 66 # 67 # If you don't want threading information in the log, set this to zero 68 # 69 logThreads = True 70 71 # 72 # If you don't want multiprocessing information in the log, set this to zero 73 # 74 logMultiprocessing = True 75 76 # 77 # If you don't want process information in the log, set this to zero 78 # 79 logProcesses = True 80 81 #--------------------------------------------------------------------------- 82 # Level related stuff 83 #--------------------------------------------------------------------------- 84 # 85 # Default levels and level names, these can be replaced with any positive set 86 # of values having corresponding names. There is a pseudo-level, NOTSET, which 87 # is only really there as a lower limit for user-defined levels. Handlers and 88 # loggers are initialized with NOTSET so that they will log all messages, even 89 # at user-defined levels. 90 # 91 92 CRITICAL = 50 93 FATAL = CRITICAL 94 ERROR = 40 95 WARNING = 30 96 WARN = WARNING 97 INFO = 20 98 DEBUG = 10 99 NOTSET = 0 100 101 _levelToName = { 102 CRITICAL: 'CRITICAL', 103 ERROR: 'ERROR', 104 WARNING: 'WARNING', 105 INFO: 'INFO', 106 DEBUG: 'DEBUG', 107 NOTSET: 'NOTSET', 108 } 109 _nameToLevel = { 110 'CRITICAL': CRITICAL, 111 'FATAL': FATAL, 112 'ERROR': ERROR, 113 'WARN': WARNING, 114 'WARNING': WARNING, 115 'INFO': INFO, 116 'DEBUG': DEBUG, 117 'NOTSET': NOTSET, 118 } 119 120 def getLevelName(level): 121 """ 122 Return the textual representation of logging level 'level'. 123 124 If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, 125 INFO, DEBUG) then you get the corresponding string. If you have 126 associated levels with names using addLevelName then the name you have 127 associated with 'level' is returned. 128 129 If a numeric value corresponding to one of the defined levels is passed 130 in, the corresponding string representation is returned. 131 132 Otherwise, the string "Level %s" % level is returned. 133 """ 134 # See Issues #22386, #27937 and #29220 for why it's this way 135 result = _levelToName.get(level) 136 if result is not None: 137 return result 138 result = _nameToLevel.get(level) 139 if result is not None: 140 return result 141 return "Level %s" % level 142 143 def addLevelName(level, levelName): 144 """ 145 Associate 'levelName' with 'level'. 146 147 This is used when converting levels to text during message formatting. 148 """ 149 _acquireLock() 150 try: #unlikely to cause an exception, but you never know... 151 _levelToName[level] = levelName 152 _nameToLevel[levelName] = level 153 finally: 154 _releaseLock() 155 156 if hasattr(sys, '_getframe'): 157 currentframe = lambda: sys._getframe(3) 158 else: #pragma: no cover 159 def currentframe(): 160 """Return the frame object for the caller's stack frame.""" 161 try: 162 raise Exception 163 except Exception: 164 return sys.exc_info()[2].tb_frame.f_back 165 166 # 167 # _srcfile is used when walking the stack to check when we've got the first 168 # caller stack frame, by skipping frames whose filename is that of this 169 # module's source. It therefore should contain the filename of this module's 170 # source file. 171 # 172 # Ordinarily we would use __file__ for this, but frozen modules don't always 173 # have __file__ set, for some reason (see Issue #21736). Thus, we get the 174 # filename from a handy code object from a function defined in this module. 175 # (There's no particular reason for picking addLevelName.) 176 # 177 178 _srcfile = os.path.normcase(addLevelName.__code__.co_filename) 179 180 # _srcfile is only used in conjunction with sys._getframe(). 181 # To provide compatibility with older versions of Python, set _srcfile 182 # to None if _getframe() is not available; this value will prevent 183 # findCaller() from being called. You can also do this if you want to avoid 184 # the overhead of fetching caller information, even when _getframe() is 185 # available. 186 #if not hasattr(sys, '_getframe'): 187 # _srcfile = None 188 189 190 def _checkLevel(level): 191 if isinstance(level, int): 192 rv = level 193 elif str(level) == level: 194 if level not in _nameToLevel: 195 raise ValueError("Unknown level: %r" % level) 196 rv = _nameToLevel[level] 197 else: 198 raise TypeError("Level not an integer or a valid string: %r" % level) 199 return rv 200 201 #--------------------------------------------------------------------------- 202 # Thread-related stuff 203 #--------------------------------------------------------------------------- 204 205 # 206 #_lock is used to serialize access to shared data structures in this module. 207 #This needs to be an RLock because fileConfig() creates and configures 208 #Handlers, and so might arbitrary user threads. Since Handler code updates the 209 #shared dictionary _handlers, it needs to acquire the lock. But if configuring, 210 #the lock would already have been acquired - so we need an RLock. 211 #The same argument applies to Loggers and Manager.loggerDict. 212 # 213 if threading: 214 _lock = threading.RLock() 215 else: #pragma: no cover 216 _lock = None 217 218 219 def _acquireLock(): 220 """ 221 Acquire the module-level lock for serializing access to shared data. 222 223 This should be released with _releaseLock(). 224 """ 225 if _lock: 226 _lock.acquire() 227 228 def _releaseLock(): 229 """ 230 Release the module-level lock acquired by calling _acquireLock(). 231 """ 232 if _lock: 233 _lock.release() 234 235 #--------------------------------------------------------------------------- 236 # The logging record 237 #--------------------------------------------------------------------------- 238 239 class LogRecord(object): 240 """ 241 A LogRecord instance represents an event being logged. 242 243 LogRecord instances are created every time something is logged. They 244 contain all the information pertinent to the event being logged. The 245 main information passed in is in msg and args, which are combined 246 using str(msg) % args to create the message field of the record. The 247 record also includes information such as when the record was created, 248 the source line where the logging call was made, and any exception 249 information to be logged. 250 """ 251 def __init__(self, name, level, pathname, lineno, 252 msg, args, exc_info, func=None, sinfo=None, **kwargs): 253 """ 254 Initialize a logging record with interesting information. 255 """ 256 ct = time.time() 257 self.name = name 258 self.msg = msg 259 # 260 # The following statement allows passing of a dictionary as a sole 261 # argument, so that you can do something like 262 # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) 263 # Suggested by Stefan Behnel. 264 # Note that without the test for args[0], we get a problem because 265 # during formatting, we test to see if the arg is present using 266 # 'if self.args:'. If the event being logged is e.g. 'Value is %d' 267 # and if the passed arg fails 'if self.args:' then no formatting 268 # is done. For example, logger.warning('Value is %d', 0) would log 269 # 'Value is %d' instead of 'Value is 0'. 270 # For the use case of passing a dictionary, this should not be a 271 # problem. 272 # Issue #21172: a request was made to relax the isinstance check 273 # to hasattr(args[0], '__getitem__'). However, the docs on string 274 # formatting still seem to suggest a mapping object is required. 275 # Thus, while not removing the isinstance check, it does now look 276 # for collections.Mapping rather than, as before, dict. 277 if (args and len(args) == 1 and isinstance(args[0], collections.Mapping) 278 and args[0]): 279 args = args[0] 280 self.args = args 281 self.levelname = getLevelName(level) 282 self.levelno = level 283 self.pathname = pathname 284 try: 285 self.filename = os.path.basename(pathname) 286 self.module = os.path.splitext(self.filename)[0] 287 except (TypeError, ValueError, AttributeError): 288 self.filename = pathname 289 self.module = "Unknown module" 290 self.exc_info = exc_info 291 self.exc_text = None # used to cache the traceback text 292 self.stack_info = sinfo 293 self.lineno = lineno 294 self.funcName = func 295 self.created = ct 296 self.msecs = (ct - int(ct)) * 1000 297 self.relativeCreated = (self.created - _startTime) * 1000 298 if logThreads and threading: 299 self.thread = threading.get_ident() 300 self.threadName = threading.current_thread().name 301 else: # pragma: no cover 302 self.thread = None 303 self.threadName = None 304 if not logMultiprocessing: # pragma: no cover 305 self.processName = None 306 else: 307 self.processName = 'MainProcess' 308 mp = sys.modules.get('multiprocessing') 309 if mp is not None: 310 # Errors may occur if multiprocessing has not finished loading 311 # yet - e.g. if a custom import hook causes third-party code 312 # to run when multiprocessing calls import. See issue 8200 313 # for an example 314 try: 315 self.processName = mp.current_process().name 316 except Exception: #pragma: no cover 317 pass 318 if logProcesses and hasattr(os, 'getpid'): 319 self.process = os.getpid() 320 else: 321 self.process = None 322 323 def __str__(self): 324 return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno, 325 self.pathname, self.lineno, self.msg) 326 327 __repr__ = __str__ 328 329 def getMessage(self): 330 """ 331 Return the message for this LogRecord. 332 333 Return the message for this LogRecord after merging any user-supplied 334 arguments with the message. 335 """ 336 msg = str(self.msg) 337 if self.args: 338 msg = msg % self.args 339 return msg 340 341 # 342 # Determine which class to use when instantiating log records. 343 # 344 _logRecordFactory = LogRecord 345 346 def setLogRecordFactory(factory): 347 """ 348 Set the factory to be used when instantiating a log record. 349 350 :param factory: A callable which will be called to instantiate 351 a log record. 352 """ 353 global _logRecordFactory 354 _logRecordFactory = factory 355 356 def getLogRecordFactory(): 357 """ 358 Return the factory to be used when instantiating a log record. 359 """ 360 361 return _logRecordFactory 362 363 def makeLogRecord(dict): 364 """ 365 Make a LogRecord whose attributes are defined by the specified dictionary, 366 This function is useful for converting a logging event received over 367 a socket connection (which is sent as a dictionary) into a LogRecord 368 instance. 369 """ 370 rv = _logRecordFactory(None, None, "", 0, "", (), None, None) 371 rv.__dict__.update(dict) 372 return rv 373 374 #--------------------------------------------------------------------------- 375 # Formatter classes and functions 376 #--------------------------------------------------------------------------- 377 378 class PercentStyle(object): 379 380 default_format = '%(message)s' 381 asctime_format = '%(asctime)s' 382 asctime_search = '%(asctime)' 383 384 def __init__(self, fmt): 385 self._fmt = fmt or self.default_format 386 387 def usesTime(self): 388 return self._fmt.find(self.asctime_search) >= 0 389 390 def format(self, record): 391 return self._fmt % record.__dict__ 392 393 class StrFormatStyle(PercentStyle): 394 default_format = '{message}' 395 asctime_format = '{asctime}' 396 asctime_search = '{asctime' 397 398 def format(self, record): 399 return self._fmt.format(**record.__dict__) 400 401 402 class StringTemplateStyle(PercentStyle): 403 default_format = '${message}' 404 asctime_format = '${asctime}' 405 asctime_search = '${asctime}' 406 407 def __init__(self, fmt): 408 self._fmt = fmt or self.default_format 409 self._tpl = Template(self._fmt) 410 411 def usesTime(self): 412 fmt = self._fmt 413 return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0 414 415 def format(self, record): 416 return self._tpl.substitute(**record.__dict__) 417 418 BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s" 419 420 _STYLES = { 421 '%': (PercentStyle, BASIC_FORMAT), 422 '{': (StrFormatStyle, '{levelname}:{name}:{message}'), 423 '$': (StringTemplateStyle, '${levelname}:${name}:${message}'), 424 } 425 426 class Formatter(object): 427 """ 428 Formatter instances are used to convert a LogRecord to text. 429 430 Formatters need to know how a LogRecord is constructed. They are 431 responsible for converting a LogRecord to (usually) a string which can 432 be interpreted by either a human or an external system. The base Formatter 433 allows a formatting string to be specified. If none is supplied, the 434 default value of "%s(message)" is used. 435 436 The Formatter can be initialized with a format string which makes use of 437 knowledge of the LogRecord attributes - e.g. the default value mentioned 438 above makes use of the fact that the user's message and arguments are pre- 439 formatted into a LogRecord's message attribute. Currently, the useful 440 attributes in a LogRecord are described by: 441 442 %(name)s Name of the logger (logging channel) 443 %(levelno)s Numeric logging level for the message (DEBUG, INFO, 444 WARNING, ERROR, CRITICAL) 445 %(levelname)s Text logging level for the message ("DEBUG", "INFO", 446 "WARNING", "ERROR", "CRITICAL") 447 %(pathname)s Full pathname of the source file where the logging 448 call was issued (if available) 449 %(filename)s Filename portion of pathname 450 %(module)s Module (name portion of filename) 451 %(lineno)d Source line number where the logging call was issued 452 (if available) 453 %(funcName)s Function name 454 %(created)f Time when the LogRecord was created (time.time() 455 return value) 456 %(asctime)s Textual time when the LogRecord was created 457 %(msecs)d Millisecond portion of the creation time 458 %(relativeCreated)d Time in milliseconds when the LogRecord was created, 459 relative to the time the logging module was loaded 460 (typically at application startup time) 461 %(thread)d Thread ID (if available) 462 %(threadName)s Thread name (if available) 463 %(process)d Process ID (if available) 464 %(message)s The result of record.getMessage(), computed just as 465 the record is emitted 466 """ 467 468 converter = time.localtime 469 470 def __init__(self, fmt=None, datefmt=None, style='%'): 471 """ 472 Initialize the formatter with specified format strings. 473 474 Initialize the formatter either with the specified format string, or a 475 default as described above. Allow for specialized date formatting with 476 the optional datefmt argument (if omitted, you get the ISO8601 format). 477 478 Use a style parameter of '%', '{' or '$' to specify that you want to 479 use one of %-formatting, :meth:`str.format` (``{}``) formatting or 480 :class:`string.Template` formatting in your format string. 481 482 .. versionchanged:: 3.2 483 Added the ``style`` parameter. 484 """ 485 if style not in _STYLES: 486 raise ValueError('Style must be one of: %s' % ','.join( 487 _STYLES.keys())) 488 self._style = _STYLES[style][0](fmt) 489 self._fmt = self._style._fmt 490 self.datefmt = datefmt 491 492 default_time_format = '%Y-%m-%d %H:%M:%S' 493 default_msec_format = '%s,%03d' 494 495 def formatTime(self, record, datefmt=None): 496 """ 497 Return the creation time of the specified LogRecord as formatted text. 498 499 This method should be called from format() by a formatter which 500 wants to make use of a formatted time. This method can be overridden 501 in formatters to provide for any specific requirement, but the 502 basic behaviour is as follows: if datefmt (a string) is specified, 503 it is used with time.strftime() to format the creation time of the 504 record. Otherwise, the ISO8601 format is used. The resulting 505 string is returned. This function uses a user-configurable function 506 to convert the creation time to a tuple. By default, time.localtime() 507 is used; to change this for a particular formatter instance, set the 508 'converter' attribute to a function with the same signature as 509 time.localtime() or time.gmtime(). To change it for all formatters, 510 for example if you want all logging times to be shown in GMT, 511 set the 'converter' attribute in the Formatter class. 512 """ 513 ct = self.converter(record.created) 514 if datefmt: 515 s = time.strftime(datefmt, ct) 516 else: 517 t = time.strftime(self.default_time_format, ct) 518 s = self.default_msec_format % (t, record.msecs) 519 return s 520 521 def formatException(self, ei): 522 """ 523 Format and return the specified exception information as a string. 524 525 This default implementation just uses 526 traceback.print_exception() 527 """ 528 sio = io.StringIO() 529 tb = ei[2] 530 # See issues #9427, #1553375. Commented out for now. 531 #if getattr(self, 'fullstack', False): 532 # traceback.print_stack(tb.tb_frame.f_back, file=sio) 533 traceback.print_exception(ei[0], ei[1], tb, None, sio) 534 s = sio.getvalue() 535 sio.close() 536 if s[-1:] == "\n": 537 s = s[:-1] 538 return s 539 540 def usesTime(self): 541 """ 542 Check if the format uses the creation time of the record. 543 """ 544 return self._style.usesTime() 545 546 def formatMessage(self, record): 547 return self._style.format(record) 548 549 def formatStack(self, stack_info): 550 """ 551 This method is provided as an extension point for specialized 552 formatting of stack information. 553 554 The input data is a string as returned from a call to 555 :func:`traceback.print_stack`, but with the last trailing newline 556 removed. 557 558 The base implementation just returns the value passed in. 559 """ 560 return stack_info 561 562 def format(self, record): 563 """ 564 Format the specified record as text. 565 566 The record's attribute dictionary is used as the operand to a 567 string formatting operation which yields the returned string. 568 Before formatting the dictionary, a couple of preparatory steps 569 are carried out. The message attribute of the record is computed 570 using LogRecord.getMessage(). If the formatting string uses the 571 time (as determined by a call to usesTime(), formatTime() is 572 called to format the event time. If there is exception information, 573 it is formatted using formatException() and appended to the message. 574 """ 575 record.message = record.getMessage() 576 if self.usesTime(): 577 record.asctime = self.formatTime(record, self.datefmt) 578 s = self.formatMessage(record) 579 if record.exc_info: 580 # Cache the traceback text to avoid converting it multiple times 581 # (it's constant anyway) 582 if not record.exc_text: 583 record.exc_text = self.formatException(record.exc_info) 584 if record.exc_text: 585 if s[-1:] != "\n": 586 s = s + "\n" 587 s = s + record.exc_text 588 if record.stack_info: 589 if s[-1:] != "\n": 590 s = s + "\n" 591 s = s + self.formatStack(record.stack_info) 592 return s 593 594 # 595 # The default formatter to use when no other is specified 596 # 597 _defaultFormatter = Formatter() 598 599 class BufferingFormatter(object): 600 """ 601 A formatter suitable for formatting a number of records. 602 """ 603 def __init__(self, linefmt=None): 604 """ 605 Optionally specify a formatter which will be used to format each 606 individual record. 607 """ 608 if linefmt: 609 self.linefmt = linefmt 610 else: 611 self.linefmt = _defaultFormatter 612 613 def formatHeader(self, records): 614 """ 615 Return the header string for the specified records. 616 """ 617 return "" 618 619 def formatFooter(self, records): 620 """ 621 Return the footer string for the specified records. 622 """ 623 return "" 624 625 def format(self, records): 626 """ 627 Format the specified records and return the result as a string. 628 """ 629 rv = "" 630 if len(records) > 0: 631 rv = rv + self.formatHeader(records) 632 for record in records: 633 rv = rv + self.linefmt.format(record) 634 rv = rv + self.formatFooter(records) 635 return rv 636 637 #--------------------------------------------------------------------------- 638 # Filter classes and functions 639 #--------------------------------------------------------------------------- 640 641 class Filter(object): 642 """ 643 Filter instances are used to perform arbitrary filtering of LogRecords. 644 645 Loggers and Handlers can optionally use Filter instances to filter 646 records as desired. The base filter class only allows events which are 647 below a certain point in the logger hierarchy. For example, a filter 648 initialized with "A.B" will allow events logged by loggers "A.B", 649 "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If 650 initialized with the empty string, all events are passed. 651 """ 652 def __init__(self, name=''): 653 """ 654 Initialize a filter. 655 656 Initialize with the name of the logger which, together with its 657 children, will have its events allowed through the filter. If no 658 name is specified, allow every event. 659 """ 660 self.name = name 661 self.nlen = len(name) 662 663 def filter(self, record): 664 """ 665 Determine if the specified record is to be logged. 666 667 Is the specified record to be logged? Returns 0 for no, nonzero for 668 yes. If deemed appropriate, the record may be modified in-place. 669 """ 670 if self.nlen == 0: 671 return True 672 elif self.name == record.name: 673 return True 674 elif record.name.find(self.name, 0, self.nlen) != 0: 675 return False 676 return (record.name[self.nlen] == ".") 677 678 class Filterer(object): 679 """ 680 A base class for loggers and handlers which allows them to share 681 common code. 682 """ 683 def __init__(self): 684 """ 685 Initialize the list of filters to be an empty list. 686 """ 687 self.filters = [] 688 689 def addFilter(self, filter): 690 """ 691 Add the specified filter to this handler. 692 """ 693 if not (filter in self.filters): 694 self.filters.append(filter) 695 696 def removeFilter(self, filter): 697 """ 698 Remove the specified filter from this handler. 699 """ 700 if filter in self.filters: 701 self.filters.remove(filter) 702 703 def filter(self, record): 704 """ 705 Determine if a record is loggable by consulting all the filters. 706 707 The default is to allow the record to be logged; any filter can veto 708 this and the record is then dropped. Returns a zero value if a record 709 is to be dropped, else non-zero. 710 711 .. versionchanged:: 3.2 712 713 Allow filters to be just callables. 714 """ 715 rv = True 716 for f in self.filters: 717 if hasattr(f, 'filter'): 718 result = f.filter(record) 719 else: 720 result = f(record) # assume callable - will raise if not 721 if not result: 722 rv = False 723 break 724 return rv 725 726 #--------------------------------------------------------------------------- 727 # Handler classes and functions 728 #--------------------------------------------------------------------------- 729 730 _handlers = weakref.WeakValueDictionary() #map of handler names to handlers 731 _handlerList = [] # added to allow handlers to be removed in reverse of order initialized 732 733 def _removeHandlerRef(wr): 734 """ 735 Remove a handler reference from the internal cleanup list. 736 """ 737 # This function can be called during module teardown, when globals are 738 # set to None. It can also be called from another thread. So we need to 739 # pre-emptively grab the necessary globals and check if they're None, 740 # to prevent race conditions and failures during interpreter shutdown. 741 acquire, release, handlers = _acquireLock, _releaseLock, _handlerList 742 if acquire and release and handlers: 743 acquire() 744 try: 745 if wr in handlers: 746 handlers.remove(wr) 747 finally: 748 release() 749 750 def _addHandlerRef(handler): 751 """ 752 Add a handler to the internal cleanup list using a weak reference. 753 """ 754 _acquireLock() 755 try: 756 _handlerList.append(weakref.ref(handler, _removeHandlerRef)) 757 finally: 758 _releaseLock() 759 760 class Handler(Filterer): 761 """ 762 Handler instances dispatch logging events to specific destinations. 763 764 The base handler class. Acts as a placeholder which defines the Handler 765 interface. Handlers can optionally use Formatter instances to format 766 records as desired. By default, no formatter is specified; in this case, 767 the 'raw' message as determined by record.message is logged. 768 """ 769 def __init__(self, level=NOTSET): 770 """ 771 Initializes the instance - basically setting the formatter to None 772 and the filter list to empty. 773 """ 774 Filterer.__init__(self) 775 self._name = None 776 self.level = _checkLevel(level) 777 self.formatter = None 778 # Add the handler to the global _handlerList (for cleanup on shutdown) 779 _addHandlerRef(self) 780 self.createLock() 781 782 def get_name(self): 783 return self._name 784 785 def set_name(self, name): 786 _acquireLock() 787 try: 788 if self._name in _handlers: 789 del _handlers[self._name] 790 self._name = name 791 if name: 792 _handlers[name] = self 793 finally: 794 _releaseLock() 795 796 name = property(get_name, set_name) 797 798 def createLock(self): 799 """ 800 Acquire a thread lock for serializing access to the underlying I/O. 801 """ 802 if threading: 803 self.lock = threading.RLock() 804 else: #pragma: no cover 805 self.lock = None 806 807 def acquire(self): 808 """ 809 Acquire the I/O thread lock. 810 """ 811 if self.lock: 812 self.lock.acquire() 813 814 def release(self): 815 """ 816 Release the I/O thread lock. 817 """ 818 if self.lock: 819 self.lock.release() 820 821 def setLevel(self, level): 822 """ 823 Set the logging level of this handler. level must be an int or a str. 824 """ 825 self.level = _checkLevel(level) 826 827 def format(self, record): 828 """ 829 Format the specified record. 830 831 If a formatter is set, use it. Otherwise, use the default formatter 832 for the module. 833 """ 834 if self.formatter: 835 fmt = self.formatter 836 else: 837 fmt = _defaultFormatter 838 return fmt.format(record) 839 840 def emit(self, record): 841 """ 842 Do whatever it takes to actually log the specified logging record. 843 844 This version is intended to be implemented by subclasses and so 845 raises a NotImplementedError. 846 """ 847 raise NotImplementedError('emit must be implemented ' 848 'by Handler subclasses') 849 850 def handle(self, record): 851 """ 852 Conditionally emit the specified logging record. 853 854 Emission depends on filters which may have been added to the handler. 855 Wrap the actual emission of the record with acquisition/release of 856 the I/O thread lock. Returns whether the filter passed the record for 857 emission. 858 """ 859 rv = self.filter(record) 860 if rv: 861 self.acquire() 862 try: 863 self.emit(record) 864 finally: 865 self.release() 866 return rv 867 868 def setFormatter(self, fmt): 869 """ 870 Set the formatter for this handler. 871 """ 872 self.formatter = fmt 873 874 def flush(self): 875 """ 876 Ensure all logging output has been flushed. 877 878 This version does nothing and is intended to be implemented by 879 subclasses. 880 """ 881 pass 882 883 def close(self): 884 """ 885 Tidy up any resources used by the handler. 886 887 This version removes the handler from an internal map of handlers, 888 _handlers, which is used for handler lookup by name. Subclasses 889 should ensure that this gets called from overridden close() 890 methods. 891 """ 892 #get the module data lock, as we're updating a shared structure. 893 _acquireLock() 894 try: #unlikely to raise an exception, but you never know... 895 if self._name and self._name in _handlers: 896 del _handlers[self._name] 897 finally: 898 _releaseLock() 899 900 def handleError(self, record): 901 """ 902 Handle errors which occur during an emit() call. 903 904 This method should be called from handlers when an exception is 905 encountered during an emit() call. If raiseExceptions is false, 906 exceptions get silently ignored. This is what is mostly wanted 907 for a logging system - most users will not care about errors in 908 the logging system, they are more interested in application errors. 909 You could, however, replace this with a custom handler if you wish. 910 The record which was being processed is passed in to this method. 911 """ 912 if raiseExceptions and sys.stderr: # see issue 13807 913 t, v, tb = sys.exc_info() 914 try: 915 sys.stderr.write('--- Logging error ---\n') 916 traceback.print_exception(t, v, tb, None, sys.stderr) 917 sys.stderr.write('Call stack:\n') 918 # Walk the stack frame up until we're out of logging, 919 # so as to print the calling context. 920 frame = tb.tb_frame 921 while (frame and os.path.dirname(frame.f_code.co_filename) == 922 __path__[0]): 923 frame = frame.f_back 924 if frame: 925 traceback.print_stack(frame, file=sys.stderr) 926 else: 927 # couldn't find the right stack frame, for some reason 928 sys.stderr.write('Logged from file %s, line %s\n' % ( 929 record.filename, record.lineno)) 930 # Issue 18671: output logging message and arguments 931 try: 932 sys.stderr.write('Message: %r\n' 933 'Arguments: %s\n' % (record.msg, 934 record.args)) 935 except Exception: 936 sys.stderr.write('Unable to print the message and arguments' 937 ' - possible formatting error.\nUse the' 938 ' traceback above to help find the error.\n' 939 ) 940 except OSError: #pragma: no cover 941 pass # see issue 5971 942 finally: 943 del t, v, tb 944 945 def __repr__(self): 946 level = getLevelName(self.level) 947 return '<%s (%s)>' % (self.__class__.__name__, level) 948 949 class StreamHandler(Handler): 950 """ 951 A handler class which writes logging records, appropriately formatted, 952 to a stream. Note that this class does not close the stream, as 953 sys.stdout or sys.stderr may be used. 954 """ 955 956 terminator = '\n' 957 958 def __init__(self, stream=None): 959 """ 960 Initialize the handler. 961 962 If stream is not specified, sys.stderr is used. 963 """ 964 Handler.__init__(self) 965 if stream is None: 966 stream = sys.stderr 967 self.stream = stream 968 969 def flush(self): 970 """ 971 Flushes the stream. 972 """ 973 self.acquire() 974 try: 975 if self.stream and hasattr(self.stream, "flush"): 976 self.stream.flush() 977 finally: 978 self.release() 979 980 def emit(self, record): 981 """ 982 Emit a record. 983 984 If a formatter is specified, it is used to format the record. 985 The record is then written to the stream with a trailing newline. If 986 exception information is present, it is formatted using 987 traceback.print_exception and appended to the stream. If the stream 988 has an 'encoding' attribute, it is used to determine how to do the 989 output to the stream. 990 """ 991 try: 992 msg = self.format(record) 993 stream = self.stream 994 stream.write(msg) 995 stream.write(self.terminator) 996 self.flush() 997 except Exception: 998 self.handleError(record) 999 1000 def __repr__(self): 1001 level = getLevelName(self.level) 1002 name = getattr(self.stream, 'name', '') 1003 if name: 1004 name += ' ' 1005 return '<%s %s(%s)>' % (self.__class__.__name__, name, level) 1006 1007 1008 class FileHandler(StreamHandler): 1009 """ 1010 A handler class which writes formatted logging records to disk files. 1011 """ 1012 def __init__(self, filename, mode='a', encoding=None, delay=False): 1013 """ 1014 Open the specified file and use it as the stream for logging. 1015 """ 1016 # Issue #27493: add support for Path objects to be passed in 1017 filename = os.fspath(filename) 1018 #keep the absolute path, otherwise derived classes which use this 1019 #may come a cropper when the current directory changes 1020 self.baseFilename = os.path.abspath(filename) 1021 self.mode = mode 1022 self.encoding = encoding 1023 self.delay = delay 1024 if delay: 1025 #We don't open the stream, but we still need to call the 1026 #Handler constructor to set level, formatter, lock etc. 1027 Handler.__init__(self) 1028 self.stream = None 1029 else: 1030 StreamHandler.__init__(self, self._open()) 1031 1032 def close(self): 1033 """ 1034 Closes the stream. 1035 """ 1036 self.acquire() 1037 try: 1038 try: 1039 if self.stream: 1040 try: 1041 self.flush() 1042 finally: 1043 stream = self.stream 1044 self.stream = None 1045 if hasattr(stream, "close"): 1046 stream.close() 1047 finally: 1048 # Issue #19523: call unconditionally to 1049 # prevent a handler leak when delay is set 1050 StreamHandler.close(self) 1051 finally: 1052 self.release() 1053 1054 def _open(self): 1055 """ 1056 Open the current base file with the (original) mode and encoding. 1057 Return the resulting stream. 1058 """ 1059 return open(self.baseFilename, self.mode, encoding=self.encoding) 1060 1061 def emit(self, record): 1062 """ 1063 Emit a record. 1064 1065 If the stream was not opened because 'delay' was specified in the 1066 constructor, open it before calling the superclass's emit. 1067 """ 1068 if self.stream is None: 1069 self.stream = self._open() 1070 StreamHandler.emit(self, record) 1071 1072 def __repr__(self): 1073 level = getLevelName(self.level) 1074 return '<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level) 1075 1076 1077 class _StderrHandler(StreamHandler): 1078 """ 1079 This class is like a StreamHandler using sys.stderr, but always uses 1080 whatever sys.stderr is currently set to rather than the value of 1081 sys.stderr at handler construction time. 1082 """ 1083 def __init__(self, level=NOTSET): 1084 """ 1085 Initialize the handler. 1086 """ 1087 Handler.__init__(self, level) 1088 1089 @property 1090 def stream(self): 1091 return sys.stderr 1092 1093 1094 _defaultLastResort = _StderrHandler(WARNING) 1095 lastResort = _defaultLastResort 1096 1097 #--------------------------------------------------------------------------- 1098 # Manager classes and functions 1099 #--------------------------------------------------------------------------- 1100 1101 class PlaceHolder(object): 1102 """ 1103 PlaceHolder instances are used in the Manager logger hierarchy to take 1104 the place of nodes for which no loggers have been defined. This class is 1105 intended for internal use only and not as part of the public API. 1106 """ 1107 def __init__(self, alogger): 1108 """ 1109 Initialize with the specified logger being a child of this placeholder. 1110 """ 1111 self.loggerMap = { alogger : None } 1112 1113 def append(self, alogger): 1114 """ 1115 Add the specified logger as a child of this placeholder. 1116 """ 1117 if alogger not in self.loggerMap: 1118 self.loggerMap[alogger] = None 1119 1120 # 1121 # Determine which class to use when instantiating loggers. 1122 # 1123 1124 def setLoggerClass(klass): 1125 """ 1126 Set the class to be used when instantiating a logger. The class should 1127 define __init__() such that only a name argument is required, and the 1128 __init__() should call Logger.__init__() 1129 """ 1130 if klass != Logger: 1131 if not issubclass(klass, Logger): 1132 raise TypeError("logger not derived from logging.Logger: " 1133 + klass.__name__) 1134 global _loggerClass 1135 _loggerClass = klass 1136 1137 def getLoggerClass(): 1138 """ 1139 Return the class to be used when instantiating a logger. 1140 """ 1141 return _loggerClass 1142 1143 class Manager(object): 1144 """ 1145 There is [under normal circumstances] just one Manager instance, which 1146 holds the hierarchy of loggers. 1147 """ 1148 def __init__(self, rootnode): 1149 """ 1150 Initialize the manager with the root node of the logger hierarchy. 1151 """ 1152 self.root = rootnode 1153 self.disable = 0 1154 self.emittedNoHandlerWarning = False 1155 self.loggerDict = {} 1156 self.loggerClass = None 1157 self.logRecordFactory = None 1158 1159 def getLogger(self, name): 1160 """ 1161 Get a logger with the specified name (channel name), creating it 1162 if it doesn't yet exist. This name is a dot-separated hierarchical 1163 name, such as "a", "a.b", "a.b.c" or similar. 1164 1165 If a PlaceHolder existed for the specified name [i.e. the logger 1166 didn't exist but a child of it did], replace it with the created 1167 logger and fix up the parent/child references which pointed to the 1168 placeholder to now point to the logger. 1169 """ 1170 rv = None 1171 if not isinstance(name, str): 1172 raise TypeError('A logger name must be a string') 1173 _acquireLock() 1174 try: 1175 if name in self.loggerDict: 1176 rv = self.loggerDict[name] 1177 if isinstance(rv, PlaceHolder): 1178 ph = rv 1179 rv = (self.loggerClass or _loggerClass)(name) 1180 rv.manager = self 1181 self.loggerDict[name] = rv 1182 self._fixupChildren(ph, rv) 1183 self._fixupParents(rv) 1184 else: 1185 rv = (self.loggerClass or _loggerClass)(name) 1186 rv.manager = self 1187 self.loggerDict[name] = rv 1188 self._fixupParents(rv) 1189 finally: 1190 _releaseLock() 1191 return rv 1192 1193 def setLoggerClass(self, klass): 1194 """ 1195 Set the class to be used when instantiating a logger with this Manager. 1196 """ 1197 if klass != Logger: 1198 if not issubclass(klass, Logger): 1199 raise TypeError("logger not derived from logging.Logger: " 1200 + klass.__name__) 1201 self.loggerClass = klass 1202 1203 def setLogRecordFactory(self, factory): 1204 """ 1205 Set the factory to be used when instantiating a log record with this 1206 Manager. 1207 """ 1208 self.logRecordFactory = factory 1209 1210 def _fixupParents(self, alogger): 1211 """ 1212 Ensure that there are either loggers or placeholders all the way 1213 from the specified logger to the root of the logger hierarchy. 1214 """ 1215 name = alogger.name 1216 i = name.rfind(".") 1217 rv = None 1218 while (i > 0) and not rv: 1219 substr = name[:i] 1220 if substr not in self.loggerDict: 1221 self.loggerDict[substr] = PlaceHolder(alogger) 1222 else: 1223 obj = self.loggerDict[substr] 1224 if isinstance(obj, Logger): 1225 rv = obj 1226 else: 1227 assert isinstance(obj, PlaceHolder) 1228 obj.append(alogger) 1229 i = name.rfind(".", 0, i - 1) 1230 if not rv: 1231 rv = self.root 1232 alogger.parent = rv 1233 1234 def _fixupChildren(self, ph, alogger): 1235 """ 1236 Ensure that children of the placeholder ph are connected to the 1237 specified logger. 1238 """ 1239 name = alogger.name 1240 namelen = len(name) 1241 for c in ph.loggerMap.keys(): 1242 #The if means ... if not c.parent.name.startswith(nm) 1243 if c.parent.name[:namelen] != name: 1244 alogger.parent = c.parent 1245 c.parent = alogger 1246 1247 #--------------------------------------------------------------------------- 1248 # Logger classes and functions 1249 #--------------------------------------------------------------------------- 1250 1251 class Logger(Filterer): 1252 """ 1253 Instances of the Logger class represent a single logging channel. A 1254 "logging channel" indicates an area of an application. Exactly how an 1255 "area" is defined is up to the application developer. Since an 1256 application can have any number of areas, logging channels are identified 1257 by a unique string. Application areas can be nested (e.g. an area 1258 of "input processing" might include sub-areas "read CSV files", "read 1259 XLS files" and "read Gnumeric files"). To cater for this natural nesting, 1260 channel names are organized into a namespace hierarchy where levels are 1261 separated by periods, much like the Java or Python package namespace. So 1262 in the instance given above, channel names might be "input" for the upper 1263 level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. 1264 There is no arbitrary limit to the depth of nesting. 1265 """ 1266 def __init__(self, name, level=NOTSET): 1267 """ 1268 Initialize the logger with a name and an optional level. 1269 """ 1270 Filterer.__init__(self) 1271 self.name = name 1272 self.level = _checkLevel(level) 1273 self.parent = None 1274 self.propagate = True 1275 self.handlers = [] 1276 self.disabled = False 1277 1278 def setLevel(self, level): 1279 """ 1280 Set the logging level of this logger. level must be an int or a str. 1281 """ 1282 self.level = _checkLevel(level) 1283 1284 def debug(self, msg, *args, **kwargs): 1285 """ 1286 Log 'msg % args' with severity 'DEBUG'. 1287 1288 To pass exception information, use the keyword argument exc_info with 1289 a true value, e.g. 1290 1291 logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) 1292 """ 1293 if self.isEnabledFor(DEBUG): 1294 self._log(DEBUG, msg, args, **kwargs) 1295 1296 def info(self, msg, *args, **kwargs): 1297 """ 1298 Log 'msg % args' with severity 'INFO'. 1299 1300 To pass exception information, use the keyword argument exc_info with 1301 a true value, e.g. 1302 1303 logger.info("Houston, we have a %s", "interesting problem", exc_info=1) 1304 """ 1305 if self.isEnabledFor(INFO): 1306 self._log(INFO, msg, args, **kwargs) 1307 1308 def warning(self, msg, *args, **kwargs): 1309 """ 1310 Log 'msg % args' with severity 'WARNING'. 1311 1312 To pass exception information, use the keyword argument exc_info with 1313 a true value, e.g. 1314 1315 logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1) 1316 """ 1317 if self.isEnabledFor(WARNING): 1318 self._log(WARNING, msg, args, **kwargs) 1319 1320 def warn(self, msg, *args, **kwargs): 1321 warnings.warn("The 'warn' method is deprecated, " 1322 "use 'warning' instead", DeprecationWarning, 2) 1323 self.warning(msg, *args, **kwargs) 1324 1325 def error(self, msg, *args, **kwargs): 1326 """ 1327 Log 'msg % args' with severity 'ERROR'. 1328 1329 To pass exception information, use the keyword argument exc_info with 1330 a true value, e.g. 1331 1332 logger.error("Houston, we have a %s", "major problem", exc_info=1) 1333 """ 1334 if self.isEnabledFor(ERROR): 1335 self._log(ERROR, msg, args, **kwargs) 1336 1337 def exception(self, msg, *args, exc_info=True, **kwargs): 1338 """ 1339 Convenience method for logging an ERROR with exception information. 1340 """ 1341 self.error(msg, *args, exc_info=exc_info, **kwargs) 1342 1343 def critical(self, msg, *args, **kwargs): 1344 """ 1345 Log 'msg % args' with severity 'CRITICAL'. 1346 1347 To pass exception information, use the keyword argument exc_info with 1348 a true value, e.g. 1349 1350 logger.critical("Houston, we have a %s", "major disaster", exc_info=1) 1351 """ 1352 if self.isEnabledFor(CRITICAL): 1353 self._log(CRITICAL, msg, args, **kwargs) 1354 1355 fatal = critical 1356 1357 def log(self, level, msg, *args, **kwargs): 1358 """ 1359 Log 'msg % args' with the integer severity 'level'. 1360 1361 To pass exception information, use the keyword argument exc_info with 1362 a true value, e.g. 1363 1364 logger.log(level, "We have a %s", "mysterious problem", exc_info=1) 1365 """ 1366 if not isinstance(level, int): 1367 if raiseExceptions: 1368 raise TypeError("level must be an integer") 1369 else: 1370 return 1371 if self.isEnabledFor(level): 1372 self._log(level, msg, args, **kwargs) 1373 1374 def findCaller(self, stack_info=False): 1375 """ 1376 Find the stack frame of the caller so that we can note the source 1377 file name, line number and function name. 1378 """ 1379 f = currentframe() 1380 #On some versions of IronPython, currentframe() returns None if 1381 #IronPython isn't run with -X:Frames. 1382 if f is not None: 1383 f = f.f_back 1384 rv = "(unknown file)", 0, "(unknown function)", None 1385 while hasattr(f, "f_code"): 1386 co = f.f_code 1387 filename = os.path.normcase(co.co_filename) 1388 if filename == _srcfile: 1389 f = f.f_back 1390 continue 1391 sinfo = None 1392 if stack_info: 1393 sio = io.StringIO() 1394 sio.write('Stack (most recent call last):\n') 1395 traceback.print_stack(f, file=sio) 1396 sinfo = sio.getvalue() 1397 if sinfo[-1] == '\n': 1398 sinfo = sinfo[:-1] 1399 sio.close() 1400 rv = (co.co_filename, f.f_lineno, co.co_name, sinfo) 1401 break 1402 return rv 1403 1404 def makeRecord(self, name, level, fn, lno, msg, args, exc_info, 1405 func=None, extra=None, sinfo=None): 1406 """ 1407 A factory method which can be overridden in subclasses to create 1408 specialized LogRecords. 1409 """ 1410 rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func, 1411 sinfo) 1412 if extra is not None: 1413 for key in extra: 1414 if (key in ["message", "asctime"]) or (key in rv.__dict__): 1415 raise KeyError("Attempt to overwrite %r in LogRecord" % key) 1416 rv.__dict__[key] = extra[key] 1417 return rv 1418 1419 def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False): 1420 """ 1421 Low-level logging routine which creates a LogRecord and then calls 1422 all the handlers of this logger to handle the record. 1423 """ 1424 sinfo = None 1425 if _srcfile: 1426 #IronPython doesn't track Python frames, so findCaller raises an 1427 #exception on some versions of IronPython. We trap it here so that 1428 #IronPython can use logging. 1429 try: 1430 fn, lno, func, sinfo = self.findCaller(stack_info) 1431 except ValueError: # pragma: no cover 1432 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1433 else: # pragma: no cover 1434 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1435 if exc_info: 1436 if isinstance(exc_info, BaseException): 1437 exc_info = (type(exc_info), exc_info, exc_info.__traceback__) 1438 elif not isinstance(exc_info, tuple): 1439 exc_info = sys.exc_info() 1440 record = self.makeRecord(self.name, level, fn, lno, msg, args, 1441 exc_info, func, extra, sinfo) 1442 self.handle(record) 1443 1444 def handle(self, record): 1445 """ 1446 Call the handlers for the specified record. 1447 1448 This method is used for unpickled records received from a socket, as 1449 well as those created locally. Logger-level filtering is applied. 1450 """ 1451 if (not self.disabled) and self.filter(record): 1452 self.callHandlers(record) 1453 1454 def addHandler(self, hdlr): 1455 """ 1456 Add the specified handler to this logger. 1457 """ 1458 _acquireLock() 1459 try: 1460 if not (hdlr in self.handlers): 1461 self.handlers.append(hdlr) 1462 finally: 1463 _releaseLock() 1464 1465 def removeHandler(self, hdlr): 1466 """ 1467 Remove the specified handler from this logger. 1468 """ 1469 _acquireLock() 1470 try: 1471 if hdlr in self.handlers: 1472 self.handlers.remove(hdlr) 1473 finally: 1474 _releaseLock() 1475 1476 def hasHandlers(self): 1477 """ 1478 See if this logger has any handlers configured. 1479 1480 Loop through all handlers for this logger and its parents in the 1481 logger hierarchy. Return True if a handler was found, else False. 1482 Stop searching up the hierarchy whenever a logger with the "propagate" 1483 attribute set to zero is found - that will be the last logger which 1484 is checked for the existence of handlers. 1485 """ 1486 c = self 1487 rv = False 1488 while c: 1489 if c.handlers: 1490 rv = True 1491 break 1492 if not c.propagate: 1493 break 1494 else: 1495 c = c.parent 1496 return rv 1497 1498 def callHandlers(self, record): 1499 """ 1500 Pass a record to all relevant handlers. 1501 1502 Loop through all handlers for this logger and its parents in the 1503 logger hierarchy. If no handler was found, output a one-off error 1504 message to sys.stderr. Stop searching up the hierarchy whenever a 1505 logger with the "propagate" attribute set to zero is found - that 1506 will be the last logger whose handlers are called. 1507 """ 1508 c = self 1509 found = 0 1510 while c: 1511 for hdlr in c.handlers: 1512 found = found + 1 1513 if record.levelno >= hdlr.level: 1514 hdlr.handle(record) 1515 if not c.propagate: 1516 c = None #break out 1517 else: 1518 c = c.parent 1519 if (found == 0): 1520 if lastResort: 1521 if record.levelno >= lastResort.level: 1522 lastResort.handle(record) 1523 elif raiseExceptions and not self.manager.emittedNoHandlerWarning: 1524 sys.stderr.write("No handlers could be found for logger" 1525 " \"%s\"\n" % self.name) 1526 self.manager.emittedNoHandlerWarning = True 1527 1528 def getEffectiveLevel(self): 1529 """ 1530 Get the effective level for this logger. 1531 1532 Loop through this logger and its parents in the logger hierarchy, 1533 looking for a non-zero logging level. Return the first one found. 1534 """ 1535 logger = self 1536 while logger: 1537 if logger.level: 1538 return logger.level 1539 logger = logger.parent 1540 return NOTSET 1541 1542 def isEnabledFor(self, level): 1543 """ 1544 Is this logger enabled for level 'level'? 1545 """ 1546 if self.manager.disable >= level: 1547 return False 1548 return level >= self.getEffectiveLevel() 1549 1550 def getChild(self, suffix): 1551 """ 1552 Get a logger which is a descendant to this one. 1553 1554 This is a convenience method, such that 1555 1556 logging.getLogger('abc').getChild('def.ghi') 1557 1558 is the same as 1559 1560 logging.getLogger('abc.def.ghi') 1561 1562 It's useful, for example, when the parent logger is named using 1563 __name__ rather than a literal string. 1564 """ 1565 if self.root is not self: 1566 suffix = '.'.join((self.name, suffix)) 1567 return self.manager.getLogger(suffix) 1568 1569 def __repr__(self): 1570 level = getLevelName(self.getEffectiveLevel()) 1571 return '<%s %s (%s)>' % (self.__class__.__name__, self.name, level) 1572 1573 1574 class RootLogger(Logger): 1575 """ 1576 A root logger is not that different to any other logger, except that 1577 it must have a logging level and there is only one instance of it in 1578 the hierarchy. 1579 """ 1580 def __init__(self, level): 1581 """ 1582 Initialize the logger with the name "root". 1583 """ 1584 Logger.__init__(self, "root", level) 1585 1586 _loggerClass = Logger 1587 1588 class LoggerAdapter(object): 1589 """ 1590 An adapter for loggers which makes it easier to specify contextual 1591 information in logging output. 1592 """ 1593 1594 def __init__(self, logger, extra): 1595 """ 1596 Initialize the adapter with a logger and a dict-like object which 1597 provides contextual information. This constructor signature allows 1598 easy stacking of LoggerAdapters, if so desired. 1599 1600 You can effectively pass keyword arguments as shown in the 1601 following example: 1602 1603 adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) 1604 """ 1605 self.logger = logger 1606 self.extra = extra 1607 1608 def process(self, msg, kwargs): 1609 """ 1610 Process the logging message and keyword arguments passed in to 1611 a logging call to insert contextual information. You can either 1612 manipulate the message itself, the keyword args or both. Return 1613 the message and kwargs modified (or not) to suit your needs. 1614 1615 Normally, you'll only need to override this one method in a 1616 LoggerAdapter subclass for your specific needs. 1617 """ 1618 kwargs["extra"] = self.extra 1619 return msg, kwargs 1620 1621 # 1622 # Boilerplate convenience methods 1623 # 1624 def debug(self, msg, *args, **kwargs): 1625 """ 1626 Delegate a debug call to the underlying logger. 1627 """ 1628 self.log(DEBUG, msg, *args, **kwargs) 1629 1630 def info(self, msg, *args, **kwargs): 1631 """ 1632 Delegate an info call to the underlying logger. 1633 """ 1634 self.log(INFO, msg, *args, **kwargs) 1635 1636 def warning(self, msg, *args, **kwargs): 1637 """ 1638 Delegate a warning call to the underlying logger. 1639 """ 1640 self.log(WARNING, msg, *args, **kwargs) 1641 1642 def warn(self, msg, *args, **kwargs): 1643 warnings.warn("The 'warn' method is deprecated, " 1644 "use 'warning' instead", DeprecationWarning, 2) 1645 self.warning(msg, *args, **kwargs) 1646 1647 def error(self, msg, *args, **kwargs): 1648 """ 1649 Delegate an error call to the underlying logger. 1650 """ 1651 self.log(ERROR, msg, *args, **kwargs) 1652 1653 def exception(self, msg, *args, exc_info=True, **kwargs): 1654 """ 1655 Delegate an exception call to the underlying logger. 1656 """ 1657 self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs) 1658 1659 def critical(self, msg, *args, **kwargs): 1660 """ 1661 Delegate a critical call to the underlying logger. 1662 """ 1663 self.log(CRITICAL, msg, *args, **kwargs) 1664 1665 def log(self, level, msg, *args, **kwargs): 1666 """ 1667 Delegate a log call to the underlying logger, after adding 1668 contextual information from this adapter instance. 1669 """ 1670 if self.isEnabledFor(level): 1671 msg, kwargs = self.process(msg, kwargs) 1672 self.logger._log(level, msg, args, **kwargs) 1673 1674 def isEnabledFor(self, level): 1675 """ 1676 Is this logger enabled for level 'level'? 1677 """ 1678 if self.logger.manager.disable >= level: 1679 return False 1680 return level >= self.getEffectiveLevel() 1681 1682 def setLevel(self, level): 1683 """ 1684 Set the specified level on the underlying logger. 1685 """ 1686 self.logger.setLevel(level) 1687 1688 def getEffectiveLevel(self): 1689 """ 1690 Get the effective level for the underlying logger. 1691 """ 1692 return self.logger.getEffectiveLevel() 1693 1694 def hasHandlers(self): 1695 """ 1696 See if the underlying logger has any handlers. 1697 """ 1698 return self.logger.hasHandlers() 1699 1700 def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False): 1701 """ 1702 Low-level log implementation, proxied to allow nested logger adapters. 1703 """ 1704 return self.logger._log( 1705 level, 1706 msg, 1707 args, 1708 exc_info=exc_info, 1709 extra=extra, 1710 stack_info=stack_info, 1711 ) 1712 1713 @property 1714 def manager(self): 1715 return self.logger.manager 1716 1717 @manager.setter 1718 def set_manager(self, value): 1719 self.logger.manager = value 1720 1721 def __repr__(self): 1722 logger = self.logger 1723 level = getLevelName(logger.getEffectiveLevel()) 1724 return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level) 1725 1726 root = RootLogger(WARNING) 1727 Logger.root = root 1728 Logger.manager = Manager(Logger.root) 1729 1730 #--------------------------------------------------------------------------- 1731 # Configuration classes and functions 1732 #--------------------------------------------------------------------------- 1733 1734 def basicConfig(**kwargs): 1735 """ 1736 Do basic configuration for the logging system. 1737 1738 This function does nothing if the root logger already has handlers 1739 configured. It is a convenience method intended for use by simple scripts 1740 to do one-shot configuration of the logging package. 1741 1742 The default behaviour is to create a StreamHandler which writes to 1743 sys.stderr, set a formatter using the BASIC_FORMAT format string, and 1744 add the handler to the root logger. 1745 1746 A number of optional keyword arguments may be specified, which can alter 1747 the default behaviour. 1748 1749 filename Specifies that a FileHandler be created, using the specified 1750 filename, rather than a StreamHandler. 1751 filemode Specifies the mode to open the file, if filename is specified 1752 (if filemode is unspecified, it defaults to 'a'). 1753 format Use the specified format string for the handler. 1754 datefmt Use the specified date/time format. 1755 style If a format string is specified, use this to specify the 1756 type of format string (possible values '%', '{', '$', for 1757 %-formatting, :meth:`str.format` and :class:`string.Template` 1758 - defaults to '%'). 1759 level Set the root logger level to the specified level. 1760 stream Use the specified stream to initialize the StreamHandler. Note 1761 that this argument is incompatible with 'filename' - if both 1762 are present, 'stream' is ignored. 1763 handlers If specified, this should be an iterable of already created 1764 handlers, which will be added to the root handler. Any handler 1765 in the list which does not have a formatter assigned will be 1766 assigned the formatter created in this function. 1767 1768 Note that you could specify a stream created using open(filename, mode) 1769 rather than passing the filename and mode in. However, it should be 1770 remembered that StreamHandler does not close its stream (since it may be 1771 using sys.stdout or sys.stderr), whereas FileHandler closes its stream 1772 when the handler is closed. 1773 1774 .. versionchanged:: 3.2 1775 Added the ``style`` parameter. 1776 1777 .. versionchanged:: 3.3 1778 Added the ``handlers`` parameter. A ``ValueError`` is now thrown for 1779 incompatible arguments (e.g. ``handlers`` specified together with 1780 ``filename``/``filemode``, or ``filename``/``filemode`` specified 1781 together with ``stream``, or ``handlers`` specified together with 1782 ``stream``. 1783 """ 1784 # Add thread safety in case someone mistakenly calls 1785 # basicConfig() from multiple threads 1786 _acquireLock() 1787 try: 1788 if len(root.handlers) == 0: 1789 handlers = kwargs.pop("handlers", None) 1790 if handlers is None: 1791 if "stream" in kwargs and "filename" in kwargs: 1792 raise ValueError("'stream' and 'filename' should not be " 1793 "specified together") 1794 else: 1795 if "stream" in kwargs or "filename" in kwargs: 1796 raise ValueError("'stream' or 'filename' should not be " 1797 "specified together with 'handlers'") 1798 if handlers is None: 1799 filename = kwargs.pop("filename", None) 1800 mode = kwargs.pop("filemode", 'a') 1801 if filename: 1802 h = FileHandler(filename, mode) 1803 else: 1804 stream = kwargs.pop("stream", None) 1805 h = StreamHandler(stream) 1806 handlers = [h] 1807 dfs = kwargs.pop("datefmt", None) 1808 style = kwargs.pop("style", '%') 1809 if style not in _STYLES: 1810 raise ValueError('Style must be one of: %s' % ','.join( 1811 _STYLES.keys())) 1812 fs = kwargs.pop("format", _STYLES[style][1]) 1813 fmt = Formatter(fs, dfs, style) 1814 for h in handlers: 1815 if h.formatter is None: 1816 h.setFormatter(fmt) 1817 root.addHandler(h) 1818 level = kwargs.pop("level", None) 1819 if level is not None: 1820 root.setLevel(level) 1821 if kwargs: 1822 keys = ', '.join(kwargs.keys()) 1823 raise ValueError('Unrecognised argument(s): %s' % keys) 1824 finally: 1825 _releaseLock() 1826 1827 #--------------------------------------------------------------------------- 1828 # Utility functions at module level. 1829 # Basically delegate everything to the root logger. 1830 #--------------------------------------------------------------------------- 1831 1832 def getLogger(name=None): 1833 """ 1834 Return a logger with the specified name, creating it if necessary. 1835 1836 If no name is specified, return the root logger. 1837 """ 1838 if name: 1839 return Logger.manager.getLogger(name) 1840 else: 1841 return root 1842 1843 def critical(msg, *args, **kwargs): 1844 """ 1845 Log a message with severity 'CRITICAL' on the root logger. If the logger 1846 has no handlers, call basicConfig() to add a console handler with a 1847 pre-defined format. 1848 """ 1849 if len(root.handlers) == 0: 1850 basicConfig() 1851 root.critical(msg, *args, **kwargs) 1852 1853 fatal = critical 1854 1855 def error(msg, *args, **kwargs): 1856 """ 1857 Log a message with severity 'ERROR' on the root logger. If the logger has 1858 no handlers, call basicConfig() to add a console handler with a pre-defined 1859 format. 1860 """ 1861 if len(root.handlers) == 0: 1862 basicConfig() 1863 root.error(msg, *args, **kwargs) 1864 1865 def exception(msg, *args, exc_info=True, **kwargs): 1866 """ 1867 Log a message with severity 'ERROR' on the root logger, with exception 1868 information. If the logger has no handlers, basicConfig() is called to add 1869 a console handler with a pre-defined format. 1870 """ 1871 error(msg, *args, exc_info=exc_info, **kwargs) 1872 1873 def warning(msg, *args, **kwargs): 1874 """ 1875 Log a message with severity 'WARNING' on the root logger. If the logger has 1876 no handlers, call basicConfig() to add a console handler with a pre-defined 1877 format. 1878 """ 1879 if len(root.handlers) == 0: 1880 basicConfig() 1881 root.warning(msg, *args, **kwargs) 1882 1883 def warn(msg, *args, **kwargs): 1884 warnings.warn("The 'warn' function is deprecated, " 1885 "use 'warning' instead", DeprecationWarning, 2) 1886 warning(msg, *args, **kwargs) 1887 1888 def info(msg, *args, **kwargs): 1889 """ 1890 Log a message with severity 'INFO' on the root logger. If the logger has 1891 no handlers, call basicConfig() to add a console handler with a pre-defined 1892 format. 1893 """ 1894 if len(root.handlers) == 0: 1895 basicConfig() 1896 root.info(msg, *args, **kwargs) 1897 1898 def debug(msg, *args, **kwargs): 1899 """ 1900 Log a message with severity 'DEBUG' on the root logger. If the logger has 1901 no handlers, call basicConfig() to add a console handler with a pre-defined 1902 format. 1903 """ 1904 if len(root.handlers) == 0: 1905 basicConfig() 1906 root.debug(msg, *args, **kwargs) 1907 1908 def log(level, msg, *args, **kwargs): 1909 """ 1910 Log 'msg % args' with the integer severity 'level' on the root logger. If 1911 the logger has no handlers, call basicConfig() to add a console handler 1912 with a pre-defined format. 1913 """ 1914 if len(root.handlers) == 0: 1915 basicConfig() 1916 root.log(level, msg, *args, **kwargs) 1917 1918 def disable(level): 1919 """ 1920 Disable all logging calls of severity 'level' and below. 1921 """ 1922 root.manager.disable = level 1923 1924 def shutdown(handlerList=_handlerList): 1925 """ 1926 Perform any cleanup actions in the logging system (e.g. flushing 1927 buffers). 1928 1929 Should be called at application exit. 1930 """ 1931 for wr in reversed(handlerList[:]): 1932 #errors might occur, for example, if files are locked 1933 #we just ignore them if raiseExceptions is not set 1934 try: 1935 h = wr() 1936 if h: 1937 try: 1938 h.acquire() 1939 h.flush() 1940 h.close() 1941 except (OSError, ValueError): 1942 # Ignore errors which might be caused 1943 # because handlers have been closed but 1944 # references to them are still around at 1945 # application exit. 1946 pass 1947 finally: 1948 h.release() 1949 except: # ignore everything, as we're shutting down 1950 if raiseExceptions: 1951 raise 1952 #else, swallow 1953 1954 #Let's try and shutdown automatically on application exit... 1955 import atexit 1956 atexit.register(shutdown) 1957 1958 # Null handler 1959 1960 class NullHandler(Handler): 1961 """ 1962 This handler does nothing. It's intended to be used to avoid the 1963 "No handlers could be found for logger XXX" one-off warning. This is 1964 important for library code, which may contain code to log events. If a user 1965 of the library does not configure logging, the one-off warning might be 1966 produced; to avoid this, the library developer simply needs to instantiate 1967 a NullHandler and add it to the top-level logger of the library module or 1968 package. 1969 """ 1970 def handle(self, record): 1971 """Stub.""" 1972 1973 def emit(self, record): 1974 """Stub.""" 1975 1976 def createLock(self): 1977 self.lock = None 1978 1979 # Warnings integration 1980 1981 _warnings_showwarning = None 1982 1983 def _showwarning(message, category, filename, lineno, file=None, line=None): 1984 """ 1985 Implementation of showwarnings which redirects to logging, which will first 1986 check to see if the file parameter is None. If a file is specified, it will 1987 delegate to the original warnings implementation of showwarning. Otherwise, 1988 it will call warnings.formatwarning and will log the resulting string to a 1989 warnings logger named "py.warnings" with level logging.WARNING. 1990 """ 1991 if file is not None: 1992 if _warnings_showwarning is not None: 1993 _warnings_showwarning(message, category, filename, lineno, file, line) 1994 else: 1995 s = warnings.formatwarning(message, category, filename, lineno, line) 1996 logger = getLogger("py.warnings") 1997 if not logger.handlers: 1998 logger.addHandler(NullHandler()) 1999 logger.warning("%s", s) 2000 2001 def captureWarnings(capture): 2002 """ 2003 If capture is true, redirect all warnings to the logging package. 2004 If capture is False, ensure that warnings are not redirected to logging 2005 but to their original destinations. 2006 """ 2007 global _warnings_showwarning 2008 if capture: 2009 if _warnings_showwarning is None: 2010 _warnings_showwarning = warnings.showwarning 2011 warnings.showwarning = _showwarning 2012 else: 2013 if _warnings_showwarning is not None: 2014 warnings.showwarning = _warnings_showwarning 2015 _warnings_showwarning = None
1 # Copyright 2001-2016 by Vinay Sajip. All Rights Reserved. 2 # 3 # Permission to use, copy, modify, and distribute this software and its 4 # documentation for any purpose and without fee is hereby granted, 5 # provided that the above copyright notice appear in all copies and that 6 # both that copyright notice and this permission notice appear in 7 # supporting documentation, and that the name of Vinay Sajip 8 # not be used in advertising or publicity pertaining to distribution 9 # of the software without specific, written prior permission. 10 # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING 11 # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL 12 # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR 13 # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER 14 # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 15 # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 17 """ 18 Logging package for Python. Based on PEP 282 and comments thereto in 19 comp.lang.python. 20 21 Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved. 22 23 To use, simply 'import logging' and log away! 24 """ 25 26 import sys, os, time, io, traceback, warnings, weakref, collections 27 28 from string import Template 29 30 __all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR', 31 'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO', 32 'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler', 33 'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig', 34 'captureWarnings', 'critical', 'debug', 'disable', 'error', 35 'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass', 36 'info', 'log', 'makeLogRecord', 'setLoggerClass', 'shutdown', 37 'warn', 'warning', 'getLogRecordFactory', 'setLogRecordFactory', 38 'lastResort', 'raiseExceptions'] 39 40 try: 41 import threading 42 except ImportError: #pragma: no cover 43 threading = None 44 45 __author__ = "Vinay Sajip <vinay_sajip@red-dove.com>" 46 __status__ = "production" 47 # The following module attributes are no longer updated. 48 __version__ = "0.5.1.2" 49 __date__ = "07 February 2010" 50 51 #--------------------------------------------------------------------------- 52 # Miscellaneous module data 53 #--------------------------------------------------------------------------- 54 55 # 56 #_startTime is used as the base when calculating the relative time of events 57 # 58 _startTime = time.time() 59 60 # 61 #raiseExceptions is used to see if exceptions during handling should be 62 #propagated 63 # 64 raiseExceptions = True 65 66 # 67 # If you don't want threading information in the log, set this to zero 68 # 69 logThreads = True 70 71 # 72 # If you don't want multiprocessing information in the log, set this to zero 73 # 74 logMultiprocessing = True 75 76 # 77 # If you don't want process information in the log, set this to zero 78 # 79 logProcesses = True 80 81 #--------------------------------------------------------------------------- 82 # Level related stuff 83 #--------------------------------------------------------------------------- 84 # 85 # Default levels and level names, these can be replaced with any positive set 86 # of values having corresponding names. There is a pseudo-level, NOTSET, which 87 # is only really there as a lower limit for user-defined levels. Handlers and 88 # loggers are initialized with NOTSET so that they will log all messages, even 89 # at user-defined levels. 90 # 91 92 CRITICAL = 50 93 FATAL = CRITICAL 94 ERROR = 40 95 WARNING = 30 96 WARN = WARNING 97 INFO = 20 98 DEBUG = 10 99 NOTSET = 0 100 101 _levelToName = { 102 CRITICAL: 'CRITICAL', 103 ERROR: 'ERROR', 104 WARNING: 'WARNING', 105 INFO: 'INFO', 106 DEBUG: 'DEBUG', 107 NOTSET: 'NOTSET', 108 } 109 _nameToLevel = { 110 'CRITICAL': CRITICAL, 111 'FATAL': FATAL, 112 'ERROR': ERROR, 113 'WARN': WARNING, 114 'WARNING': WARNING, 115 'INFO': INFO, 116 'DEBUG': DEBUG, 117 'NOTSET': NOTSET, 118 } 119 120 def getLevelName(level): 121 """ 122 Return the textual representation of logging level 'level'. 123 124 If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, 125 INFO, DEBUG) then you get the corresponding string. If you have 126 associated levels with names using addLevelName then the name you have 127 associated with 'level' is returned. 128 129 If a numeric value corresponding to one of the defined levels is passed 130 in, the corresponding string representation is returned. 131 132 Otherwise, the string "Level %s" % level is returned. 133 """ 134 # See Issues #22386, #27937 and #29220 for why it's this way 135 result = _levelToName.get(level) 136 if result is not None: 137 return result 138 result = _nameToLevel.get(level) 139 if result is not None: 140 return result 141 return "Level %s" % level 142 143 def addLevelName(level, levelName): 144 """ 145 Associate 'levelName' with 'level'. 146 147 This is used when converting levels to text during message formatting. 148 """ 149 _acquireLock() 150 try: #unlikely to cause an exception, but you never know... 151 _levelToName[level] = levelName 152 _nameToLevel[levelName] = level 153 finally: 154 _releaseLock() 155 156 if hasattr(sys, '_getframe'): 157 currentframe = lambda: sys._getframe(3) 158 else: #pragma: no cover 159 def currentframe(): 160 """Return the frame object for the caller's stack frame.""" 161 try: 162 raise Exception 163 except Exception: 164 return sys.exc_info()[2].tb_frame.f_back 165 166 # 167 # _srcfile is used when walking the stack to check when we've got the first 168 # caller stack frame, by skipping frames whose filename is that of this 169 # module's source. It therefore should contain the filename of this module's 170 # source file. 171 # 172 # Ordinarily we would use __file__ for this, but frozen modules don't always 173 # have __file__ set, for some reason (see Issue #21736). Thus, we get the 174 # filename from a handy code object from a function defined in this module. 175 # (There's no particular reason for picking addLevelName.) 176 # 177 178 _srcfile = os.path.normcase(addLevelName.__code__.co_filename) 179 180 # _srcfile is only used in conjunction with sys._getframe(). 181 # To provide compatibility with older versions of Python, set _srcfile 182 # to None if _getframe() is not available; this value will prevent 183 # findCaller() from being called. You can also do this if you want to avoid 184 # the overhead of fetching caller information, even when _getframe() is 185 # available. 186 #if not hasattr(sys, '_getframe'): 187 # _srcfile = None 188 189 190 def _checkLevel(level): 191 if isinstance(level, int): 192 rv = level 193 elif str(level) == level: 194 if level not in _nameToLevel: 195 raise ValueError("Unknown level: %r" % level) 196 rv = _nameToLevel[level] 197 else: 198 raise TypeError("Level not an integer or a valid string: %r" % level) 199 return rv 200 201 #--------------------------------------------------------------------------- 202 # Thread-related stuff 203 #--------------------------------------------------------------------------- 204 205 # 206 #_lock is used to serialize access to shared data structures in this module. 207 #This needs to be an RLock because fileConfig() creates and configures 208 #Handlers, and so might arbitrary user threads. Since Handler code updates the 209 #shared dictionary _handlers, it needs to acquire the lock. But if configuring, 210 #the lock would already have been acquired - so we need an RLock. 211 #The same argument applies to Loggers and Manager.loggerDict. 212 # 213 if threading: 214 _lock = threading.RLock() 215 else: #pragma: no cover 216 _lock = None 217 218 219 def _acquireLock(): 220 """ 221 Acquire the module-level lock for serializing access to shared data. 222 223 This should be released with _releaseLock(). 224 """ 225 if _lock: 226 _lock.acquire() 227 228 def _releaseLock(): 229 """ 230 Release the module-level lock acquired by calling _acquireLock(). 231 """ 232 if _lock: 233 _lock.release() 234 235 #--------------------------------------------------------------------------- 236 # The logging record 237 #--------------------------------------------------------------------------- 238 239 class LogRecord(object): 240 """ 241 A LogRecord instance represents an event being logged. 242 243 LogRecord instances are created every time something is logged. They 244 contain all the information pertinent to the event being logged. The 245 main information passed in is in msg and args, which are combined 246 using str(msg) % args to create the message field of the record. The 247 record also includes information such as when the record was created, 248 the source line where the logging call was made, and any exception 249 information to be logged. 250 """ 251 def __init__(self, name, level, pathname, lineno, 252 msg, args, exc_info, func=None, sinfo=None, **kwargs): 253 """ 254 Initialize a logging record with interesting information. 255 """ 256 ct = time.time() 257 self.name = name 258 self.msg = msg 259 # 260 # The following statement allows passing of a dictionary as a sole 261 # argument, so that you can do something like 262 # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) 263 # Suggested by Stefan Behnel. 264 # Note that without the test for args[0], we get a problem because 265 # during formatting, we test to see if the arg is present using 266 # 'if self.args:'. If the event being logged is e.g. 'Value is %d' 267 # and if the passed arg fails 'if self.args:' then no formatting 268 # is done. For example, logger.warning('Value is %d', 0) would log 269 # 'Value is %d' instead of 'Value is 0'. 270 # For the use case of passing a dictionary, this should not be a 271 # problem. 272 # Issue #21172: a request was made to relax the isinstance check 273 # to hasattr(args[0], '__getitem__'). However, the docs on string 274 # formatting still seem to suggest a mapping object is required. 275 # Thus, while not removing the isinstance check, it does now look 276 # for collections.Mapping rather than, as before, dict. 277 if (args and len(args) == 1 and isinstance(args[0], collections.Mapping) 278 and args[0]): 279 args = args[0] 280 self.args = args 281 self.levelname = getLevelName(level) 282 self.levelno = level 283 self.pathname = pathname 284 try: 285 self.filename = os.path.basename(pathname) 286 self.module = os.path.splitext(self.filename)[0] 287 except (TypeError, ValueError, AttributeError): 288 self.filename = pathname 289 self.module = "Unknown module" 290 self.exc_info = exc_info 291 self.exc_text = None # used to cache the traceback text 292 self.stack_info = sinfo 293 self.lineno = lineno 294 self.funcName = func 295 self.created = ct 296 self.msecs = (ct - int(ct)) * 1000 297 self.relativeCreated = (self.created - _startTime) * 1000 298 if logThreads and threading: 299 self.thread = threading.get_ident() 300 self.threadName = threading.current_thread().name 301 else: # pragma: no cover 302 self.thread = None 303 self.threadName = None 304 if not logMultiprocessing: # pragma: no cover 305 self.processName = None 306 else: 307 self.processName = 'MainProcess' 308 mp = sys.modules.get('multiprocessing') 309 if mp is not None: 310 # Errors may occur if multiprocessing has not finished loading 311 # yet - e.g. if a custom import hook causes third-party code 312 # to run when multiprocessing calls import. See issue 8200 313 # for an example 314 try: 315 self.processName = mp.current_process().name 316 except Exception: #pragma: no cover 317 pass 318 if logProcesses and hasattr(os, 'getpid'): 319 self.process = os.getpid() 320 else: 321 self.process = None 322 323 def __str__(self): 324 return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno, 325 self.pathname, self.lineno, self.msg) 326 327 __repr__ = __str__ 328 329 def getMessage(self): 330 """ 331 Return the message for this LogRecord. 332 333 Return the message for this LogRecord after merging any user-supplied 334 arguments with the message. 335 """ 336 msg = str(self.msg) 337 if self.args: 338 msg = msg % self.args 339 return msg 340 341 # 342 # Determine which class to use when instantiating log records. 343 # 344 _logRecordFactory = LogRecord 345 346 def setLogRecordFactory(factory): 347 """ 348 Set the factory to be used when instantiating a log record. 349 350 :param factory: A callable which will be called to instantiate 351 a log record. 352 """ 353 global _logRecordFactory 354 _logRecordFactory = factory 355 356 def getLogRecordFactory(): 357 """ 358 Return the factory to be used when instantiating a log record. 359 """ 360 361 return _logRecordFactory 362 363 def makeLogRecord(dict): 364 """ 365 Make a LogRecord whose attributes are defined by the specified dictionary, 366 This function is useful for converting a logging event received over 367 a socket connection (which is sent as a dictionary) into a LogRecord 368 instance. 369 """ 370 rv = _logRecordFactory(None, None, "", 0, "", (), None, None) 371 rv.__dict__.update(dict) 372 return rv 373 374 #--------------------------------------------------------------------------- 375 # Formatter classes and functions 376 #--------------------------------------------------------------------------- 377 378 class PercentStyle(object): 379 380 default_format = '%(message)s' 381 asctime_format = '%(asctime)s' 382 asctime_search = '%(asctime)' 383 384 def __init__(self, fmt): 385 self._fmt = fmt or self.default_format 386 387 def usesTime(self): 388 return self._fmt.find(self.asctime_search) >= 0 389 390 def format(self, record): 391 return self._fmt % record.__dict__ 392 393 class StrFormatStyle(PercentStyle): 394 default_format = '{message}' 395 asctime_format = '{asctime}' 396 asctime_search = '{asctime' 397 398 def format(self, record): 399 return self._fmt.format(**record.__dict__) 400 401 402 class StringTemplateStyle(PercentStyle): 403 default_format = '${message}' 404 asctime_format = '${asctime}' 405 asctime_search = '${asctime}' 406 407 def __init__(self, fmt): 408 self._fmt = fmt or self.default_format 409 self._tpl = Template(self._fmt) 410 411 def usesTime(self): 412 fmt = self._fmt 413 return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0 414 415 def format(self, record): 416 return self._tpl.substitute(**record.__dict__) 417 418 BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s" 419 420 _STYLES = { 421 '%': (PercentStyle, BASIC_FORMAT), 422 '{': (StrFormatStyle, '{levelname}:{name}:{message}'), 423 '$': (StringTemplateStyle, '${levelname}:${name}:${message}'), 424 } 425 426 class Formatter(object): 427 """ 428 Formatter instances are used to convert a LogRecord to text. 429 430 Formatters need to know how a LogRecord is constructed. They are 431 responsible for converting a LogRecord to (usually) a string which can 432 be interpreted by either a human or an external system. The base Formatter 433 allows a formatting string to be specified. If none is supplied, the 434 default value of "%s(message)" is used. 435 436 The Formatter can be initialized with a format string which makes use of 437 knowledge of the LogRecord attributes - e.g. the default value mentioned 438 above makes use of the fact that the user's message and arguments are pre- 439 formatted into a LogRecord's message attribute. Currently, the useful 440 attributes in a LogRecord are described by: 441 442 %(name)s Name of the logger (logging channel) 443 %(levelno)s Numeric logging level for the message (DEBUG, INFO, 444 WARNING, ERROR, CRITICAL) 445 %(levelname)s Text logging level for the message ("DEBUG", "INFO", 446 "WARNING", "ERROR", "CRITICAL") 447 %(pathname)s Full pathname of the source file where the logging 448 call was issued (if available) 449 %(filename)s Filename portion of pathname 450 %(module)s Module (name portion of filename) 451 %(lineno)d Source line number where the logging call was issued 452 (if available) 453 %(funcName)s Function name 454 %(created)f Time when the LogRecord was created (time.time() 455 return value) 456 %(asctime)s Textual time when the LogRecord was created 457 %(msecs)d Millisecond portion of the creation time 458 %(relativeCreated)d Time in milliseconds when the LogRecord was created, 459 relative to the time the logging module was loaded 460 (typically at application startup time) 461 %(thread)d Thread ID (if available) 462 %(threadName)s Thread name (if available) 463 %(process)d Process ID (if available) 464 %(message)s The result of record.getMessage(), computed just as 465 the record is emitted 466 """ 467 468 converter = time.localtime 469 470 def __init__(self, fmt=None, datefmt=None, style='%'): 471 """ 472 Initialize the formatter with specified format strings. 473 474 Initialize the formatter either with the specified format string, or a 475 default as described above. Allow for specialized date formatting with 476 the optional datefmt argument (if omitted, you get the ISO8601 format). 477 478 Use a style parameter of '%', '{' or '$' to specify that you want to 479 use one of %-formatting, :meth:`str.format` (``{}``) formatting or 480 :class:`string.Template` formatting in your format string. 481 482 .. versionchanged:: 3.2 483 Added the ``style`` parameter. 484 """ 485 if style not in _STYLES: 486 raise ValueError('Style must be one of: %s' % ','.join( 487 _STYLES.keys())) 488 self._style = _STYLES[style][0](fmt) 489 self._fmt = self._style._fmt 490 self.datefmt = datefmt 491 492 default_time_format = '%Y-%m-%d %H:%M:%S' 493 default_msec_format = '%s,%03d' 494 495 def formatTime(self, record, datefmt=None): 496 """ 497 Return the creation time of the specified LogRecord as formatted text. 498 499 This method should be called from format() by a formatter which 500 wants to make use of a formatted time. This method can be overridden 501 in formatters to provide for any specific requirement, but the 502 basic behaviour is as follows: if datefmt (a string) is specified, 503 it is used with time.strftime() to format the creation time of the 504 record. Otherwise, the ISO8601 format is used. The resulting 505 string is returned. This function uses a user-configurable function 506 to convert the creation time to a tuple. By default, time.localtime() 507 is used; to change this for a particular formatter instance, set the 508 'converter' attribute to a function with the same signature as 509 time.localtime() or time.gmtime(). To change it for all formatters, 510 for example if you want all logging times to be shown in GMT, 511 set the 'converter' attribute in the Formatter class. 512 """ 513 ct = self.converter(record.created) 514 if datefmt: 515 s = time.strftime(datefmt, ct) 516 else: 517 t = time.strftime(self.default_time_format, ct) 518 s = self.default_msec_format % (t, record.msecs) 519 return s 520 521 def formatException(self, ei): 522 """ 523 Format and return the specified exception information as a string. 524 525 This default implementation just uses 526 traceback.print_exception() 527 """ 528 sio = io.StringIO() 529 tb = ei[2] 530 # See issues #9427, #1553375. Commented out for now. 531 #if getattr(self, 'fullstack', False): 532 # traceback.print_stack(tb.tb_frame.f_back, file=sio) 533 traceback.print_exception(ei[0], ei[1], tb, None, sio) 534 s = sio.getvalue() 535 sio.close() 536 if s[-1:] == "\n": 537 s = s[:-1] 538 return s 539 540 def usesTime(self): 541 """ 542 Check if the format uses the creation time of the record. 543 """ 544 return self._style.usesTime() 545 546 def formatMessage(self, record): 547 return self._style.format(record) 548 549 def formatStack(self, stack_info): 550 """ 551 This method is provided as an extension point for specialized 552 formatting of stack information. 553 554 The input data is a string as returned from a call to 555 :func:`traceback.print_stack`, but with the last trailing newline 556 removed. 557 558 The base implementation just returns the value passed in. 559 """ 560 return stack_info 561 562 def format(self, record): 563 """ 564 Format the specified record as text. 565 566 The record's attribute dictionary is used as the operand to a 567 string formatting operation which yields the returned string. 568 Before formatting the dictionary, a couple of preparatory steps 569 are carried out. The message attribute of the record is computed 570 using LogRecord.getMessage(). If the formatting string uses the 571 time (as determined by a call to usesTime(), formatTime() is 572 called to format the event time. If there is exception information, 573 it is formatted using formatException() and appended to the message. 574 """ 575 record.message = record.getMessage() 576 if self.usesTime(): 577 record.asctime = self.formatTime(record, self.datefmt) 578 s = self.formatMessage(record) 579 if record.exc_info: 580 # Cache the traceback text to avoid converting it multiple times 581 # (it's constant anyway) 582 if not record.exc_text: 583 record.exc_text = self.formatException(record.exc_info) 584 if record.exc_text: 585 if s[-1:] != "\n": 586 s = s + "\n" 587 s = s + record.exc_text 588 if record.stack_info: 589 if s[-1:] != "\n": 590 s = s + "\n" 591 s = s + self.formatStack(record.stack_info) 592 return s 593 594 # 595 # The default formatter to use when no other is specified 596 # 597 _defaultFormatter = Formatter() 598 599 class BufferingFormatter(object): 600 """ 601 A formatter suitable for formatting a number of records. 602 """ 603 def __init__(self, linefmt=None): 604 """ 605 Optionally specify a formatter which will be used to format each 606 individual record. 607 """ 608 if linefmt: 609 self.linefmt = linefmt 610 else: 611 self.linefmt = _defaultFormatter 612 613 def formatHeader(self, records): 614 """ 615 Return the header string for the specified records. 616 """ 617 return "" 618 619 def formatFooter(self, records): 620 """ 621 Return the footer string for the specified records. 622 """ 623 return "" 624 625 def format(self, records): 626 """ 627 Format the specified records and return the result as a string. 628 """ 629 rv = "" 630 if len(records) > 0: 631 rv = rv + self.formatHeader(records) 632 for record in records: 633 rv = rv + self.linefmt.format(record) 634 rv = rv + self.formatFooter(records) 635 return rv 636 637 #--------------------------------------------------------------------------- 638 # Filter classes and functions 639 #--------------------------------------------------------------------------- 640 641 class Filter(object): 642 """ 643 Filter instances are used to perform arbitrary filtering of LogRecords. 644 645 Loggers and Handlers can optionally use Filter instances to filter 646 records as desired. The base filter class only allows events which are 647 below a certain point in the logger hierarchy. For example, a filter 648 initialized with "A.B" will allow events logged by loggers "A.B", 649 "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If 650 initialized with the empty string, all events are passed. 651 """ 652 def __init__(self, name=''): 653 """ 654 Initialize a filter. 655 656 Initialize with the name of the logger which, together with its 657 children, will have its events allowed through the filter. If no 658 name is specified, allow every event. 659 """ 660 self.name = name 661 self.nlen = len(name) 662 663 def filter(self, record): 664 """ 665 Determine if the specified record is to be logged. 666 667 Is the specified record to be logged? Returns 0 for no, nonzero for 668 yes. If deemed appropriate, the record may be modified in-place. 669 """ 670 if self.nlen == 0: 671 return True 672 elif self.name == record.name: 673 return True 674 elif record.name.find(self.name, 0, self.nlen) != 0: 675 return False 676 return (record.name[self.nlen] == ".") 677 678 class Filterer(object): 679 """ 680 A base class for loggers and handlers which allows them to share 681 common code. 682 """ 683 def __init__(self): 684 """ 685 Initialize the list of filters to be an empty list. 686 """ 687 self.filters = [] 688 689 def addFilter(self, filter): 690 """ 691 Add the specified filter to this handler. 692 """ 693 if not (filter in self.filters): 694 self.filters.append(filter) 695 696 def removeFilter(self, filter): 697 """ 698 Remove the specified filter from this handler. 699 """ 700 if filter in self.filters: 701 self.filters.remove(filter) 702 703 def filter(self, record): 704 """ 705 Determine if a record is loggable by consulting all the filters. 706 707 The default is to allow the record to be logged; any filter can veto 708 this and the record is then dropped. Returns a zero value if a record 709 is to be dropped, else non-zero. 710 711 .. versionchanged:: 3.2 712 713 Allow filters to be just callables. 714 """ 715 rv = True 716 for f in self.filters: 717 if hasattr(f, 'filter'): 718 result = f.filter(record) 719 else: 720 result = f(record) # assume callable - will raise if not 721 if not result: 722 rv = False 723 break 724 return rv 725 726 #--------------------------------------------------------------------------- 727 # Handler classes and functions 728 #--------------------------------------------------------------------------- 729 730 _handlers = weakref.WeakValueDictionary() #map of handler names to handlers 731 _handlerList = [] # added to allow handlers to be removed in reverse of order initialized 732 733 def _removeHandlerRef(wr): 734 """ 735 Remove a handler reference from the internal cleanup list. 736 """ 737 # This function can be called during module teardown, when globals are 738 # set to None. It can also be called from another thread. So we need to 739 # pre-emptively grab the necessary globals and check if they're None, 740 # to prevent race conditions and failures during interpreter shutdown. 741 acquire, release, handlers = _acquireLock, _releaseLock, _handlerList 742 if acquire and release and handlers: 743 acquire() 744 try: 745 if wr in handlers: 746 handlers.remove(wr) 747 finally: 748 release() 749 750 def _addHandlerRef(handler): 751 """ 752 Add a handler to the internal cleanup list using a weak reference. 753 """ 754 _acquireLock() 755 try: 756 _handlerList.append(weakref.ref(handler, _removeHandlerRef)) 757 finally: 758 _releaseLock() 759 760 class Handler(Filterer): 761 """ 762 Handler instances dispatch logging events to specific destinations. 763 764 The base handler class. Acts as a placeholder which defines the Handler 765 interface. Handlers can optionally use Formatter instances to format 766 records as desired. By default, no formatter is specified; in this case, 767 the 'raw' message as determined by record.message is logged. 768 """ 769 def __init__(self, level=NOTSET): 770 """ 771 Initializes the instance - basically setting the formatter to None 772 and the filter list to empty. 773 """ 774 Filterer.__init__(self) 775 self._name = None 776 self.level = _checkLevel(level) 777 self.formatter = None 778 # Add the handler to the global _handlerList (for cleanup on shutdown) 779 _addHandlerRef(self) 780 self.createLock() 781 782 def get_name(self): 783 return self._name 784 785 def set_name(self, name): 786 _acquireLock() 787 try: 788 if self._name in _handlers: 789 del _handlers[self._name] 790 self._name = name 791 if name: 792 _handlers[name] = self 793 finally: 794 _releaseLock() 795 796 name = property(get_name, set_name) 797 798 def createLock(self): 799 """ 800 Acquire a thread lock for serializing access to the underlying I/O. 801 """ 802 if threading: 803 self.lock = threading.RLock() 804 else: #pragma: no cover 805 self.lock = None 806 807 def acquire(self): 808 """ 809 Acquire the I/O thread lock. 810 """ 811 if self.lock: 812 self.lock.acquire() 813 814 def release(self): 815 """ 816 Release the I/O thread lock. 817 """ 818 if self.lock: 819 self.lock.release() 820 821 def setLevel(self, level): 822 """ 823 Set the logging level of this handler. level must be an int or a str. 824 """ 825 self.level = _checkLevel(level) 826 827 def format(self, record): 828 """ 829 Format the specified record. 830 831 If a formatter is set, use it. Otherwise, use the default formatter 832 for the module. 833 """ 834 if self.formatter: 835 fmt = self.formatter 836 else: 837 fmt = _defaultFormatter 838 return fmt.format(record) 839 840 def emit(self, record): 841 """ 842 Do whatever it takes to actually log the specified logging record. 843 844 This version is intended to be implemented by subclasses and so 845 raises a NotImplementedError. 846 """ 847 raise NotImplementedError('emit must be implemented ' 848 'by Handler subclasses') 849 850 def handle(self, record): 851 """ 852 Conditionally emit the specified logging record. 853 854 Emission depends on filters which may have been added to the handler. 855 Wrap the actual emission of the record with acquisition/release of 856 the I/O thread lock. Returns whether the filter passed the record for 857 emission. 858 """ 859 rv = self.filter(record) 860 if rv: 861 self.acquire() 862 try: 863 self.emit(record) 864 finally: 865 self.release() 866 return rv 867 868 def setFormatter(self, fmt): 869 """ 870 Set the formatter for this handler. 871 """ 872 self.formatter = fmt 873 874 def flush(self): 875 """ 876 Ensure all logging output has been flushed. 877 878 This version does nothing and is intended to be implemented by 879 subclasses. 880 """ 881 pass 882 883 def close(self): 884 """ 885 Tidy up any resources used by the handler. 886 887 This version removes the handler from an internal map of handlers, 888 _handlers, which is used for handler lookup by name. Subclasses 889 should ensure that this gets called from overridden close() 890 methods. 891 """ 892 #get the module data lock, as we're updating a shared structure. 893 _acquireLock() 894 try: #unlikely to raise an exception, but you never know... 895 if self._name and self._name in _handlers: 896 del _handlers[self._name] 897 finally: 898 _releaseLock() 899 900 def handleError(self, record): 901 """ 902 Handle errors which occur during an emit() call. 903 904 This method should be called from handlers when an exception is 905 encountered during an emit() call. If raiseExceptions is false, 906 exceptions get silently ignored. This is what is mostly wanted 907 for a logging system - most users will not care about errors in 908 the logging system, they are more interested in application errors. 909 You could, however, replace this with a custom handler if you wish. 910 The record which was being processed is passed in to this method. 911 """ 912 if raiseExceptions and sys.stderr: # see issue 13807 913 t, v, tb = sys.exc_info() 914 try: 915 sys.stderr.write('--- Logging error ---\n') 916 traceback.print_exception(t, v, tb, None, sys.stderr) 917 sys.stderr.write('Call stack:\n') 918 # Walk the stack frame up until we're out of logging, 919 # so as to print the calling context. 920 frame = tb.tb_frame 921 while (frame and os.path.dirname(frame.f_code.co_filename) == 922 __path__[0]): 923 frame = frame.f_back 924 if frame: 925 traceback.print_stack(frame, file=sys.stderr) 926 else: 927 # couldn't find the right stack frame, for some reason 928 sys.stderr.write('Logged from file %s, line %s\n' % ( 929 record.filename, record.lineno)) 930 # Issue 18671: output logging message and arguments 931 try: 932 sys.stderr.write('Message: %r\n' 933 'Arguments: %s\n' % (record.msg, 934 record.args)) 935 except Exception: 936 sys.stderr.write('Unable to print the message and arguments' 937 ' - possible formatting error.\nUse the' 938 ' traceback above to help find the error.\n' 939 ) 940 except OSError: #pragma: no cover 941 pass # see issue 5971 942 finally: 943 del t, v, tb 944 945 def __repr__(self): 946 level = getLevelName(self.level) 947 return '<%s (%s)>' % (self.__class__.__name__, level) 948 949 class StreamHandler(Handler): 950 """ 951 A handler class which writes logging records, appropriately formatted, 952 to a stream. Note that this class does not close the stream, as 953 sys.stdout or sys.stderr may be used. 954 """ 955 956 terminator = '\n' 957 958 def __init__(self, stream=None): 959 """ 960 Initialize the handler. 961 962 If stream is not specified, sys.stderr is used. 963 """ 964 Handler.__init__(self) 965 if stream is None: 966 stream = sys.stderr 967 self.stream = stream 968 969 def flush(self): 970 """ 971 Flushes the stream. 972 """ 973 self.acquire() 974 try: 975 if self.stream and hasattr(self.stream, "flush"): 976 self.stream.flush() 977 finally: 978 self.release() 979 980 def emit(self, record): 981 """ 982 Emit a record. 983 984 If a formatter is specified, it is used to format the record. 985 The record is then written to the stream with a trailing newline. If 986 exception information is present, it is formatted using 987 traceback.print_exception and appended to the stream. If the stream 988 has an 'encoding' attribute, it is used to determine how to do the 989 output to the stream. 990 """ 991 try: 992 msg = self.format(record) 993 stream = self.stream 994 stream.write(msg) 995 stream.write(self.terminator) 996 self.flush() 997 except Exception: 998 self.handleError(record) 999 1000 def __repr__(self): 1001 level = getLevelName(self.level) 1002 name = getattr(self.stream, 'name', '') 1003 if name: 1004 name += ' ' 1005 return '<%s %s(%s)>' % (self.__class__.__name__, name, level) 1006 1007 1008 class FileHandler(StreamHandler): 1009 """ 1010 A handler class which writes formatted logging records to disk files. 1011 """ 1012 def __init__(self, filename, mode='a', encoding=None, delay=False): 1013 """ 1014 Open the specified file and use it as the stream for logging. 1015 """ 1016 # Issue #27493: add support for Path objects to be passed in 1017 filename = os.fspath(filename) 1018 #keep the absolute path, otherwise derived classes which use this 1019 #may come a cropper when the current directory changes 1020 self.baseFilename = os.path.abspath(filename) 1021 self.mode = mode 1022 self.encoding = encoding 1023 self.delay = delay 1024 if delay: 1025 #We don't open the stream, but we still need to call the 1026 #Handler constructor to set level, formatter, lock etc. 1027 Handler.__init__(self) 1028 self.stream = None 1029 else: 1030 StreamHandler.__init__(self, self._open()) 1031 1032 def close(self): 1033 """ 1034 Closes the stream. 1035 """ 1036 self.acquire() 1037 try: 1038 try: 1039 if self.stream: 1040 try: 1041 self.flush() 1042 finally: 1043 stream = self.stream 1044 self.stream = None 1045 if hasattr(stream, "close"): 1046 stream.close() 1047 finally: 1048 # Issue #19523: call unconditionally to 1049 # prevent a handler leak when delay is set 1050 StreamHandler.close(self) 1051 finally: 1052 self.release() 1053 1054 def _open(self): 1055 """ 1056 Open the current base file with the (original) mode and encoding. 1057 Return the resulting stream. 1058 """ 1059 return open(self.baseFilename, self.mode, encoding=self.encoding) 1060 1061 def emit(self, record): 1062 """ 1063 Emit a record. 1064 1065 If the stream was not opened because 'delay' was specified in the 1066 constructor, open it before calling the superclass's emit. 1067 """ 1068 if self.stream is None: 1069 self.stream = self._open() 1070 StreamHandler.emit(self, record) 1071 1072 def __repr__(self): 1073 level = getLevelName(self.level) 1074 return '<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level) 1075 1076 1077 class _StderrHandler(StreamHandler): 1078 """ 1079 This class is like a StreamHandler using sys.stderr, but always uses 1080 whatever sys.stderr is currently set to rather than the value of 1081 sys.stderr at handler construction time. 1082 """ 1083 def __init__(self, level=NOTSET): 1084 """ 1085 Initialize the handler. 1086 """ 1087 Handler.__init__(self, level) 1088 1089 @property 1090 def stream(self): 1091 return sys.stderr 1092 1093 1094 _defaultLastResort = _StderrHandler(WARNING) 1095 lastResort = _defaultLastResort 1096 1097 #--------------------------------------------------------------------------- 1098 # Manager classes and functions 1099 #--------------------------------------------------------------------------- 1100 1101 class PlaceHolder(object): 1102 """ 1103 PlaceHolder instances are used in the Manager logger hierarchy to take 1104 the place of nodes for which no loggers have been defined. This class is 1105 intended for internal use only and not as part of the public API. 1106 """ 1107 def __init__(self, alogger): 1108 """ 1109 Initialize with the specified logger being a child of this placeholder. 1110 """ 1111 self.loggerMap = { alogger : None } 1112 1113 def append(self, alogger): 1114 """ 1115 Add the specified logger as a child of this placeholder. 1116 """ 1117 if alogger not in self.loggerMap: 1118 self.loggerMap[alogger] = None 1119 1120 # 1121 # Determine which class to use when instantiating loggers. 1122 # 1123 1124 def setLoggerClass(klass): 1125 """ 1126 Set the class to be used when instantiating a logger. The class should 1127 define __init__() such that only a name argument is required, and the 1128 __init__() should call Logger.__init__() 1129 """ 1130 if klass != Logger: 1131 if not issubclass(klass, Logger): 1132 raise TypeError("logger not derived from logging.Logger: " 1133 + klass.__name__) 1134 global _loggerClass 1135 _loggerClass = klass 1136 1137 def getLoggerClass(): 1138 """ 1139 Return the class to be used when instantiating a logger. 1140 """ 1141 return _loggerClass 1142 1143 class Manager(object): 1144 """ 1145 There is [under normal circumstances] just one Manager instance, which 1146 holds the hierarchy of loggers. 1147 """ 1148 def __init__(self, rootnode): 1149 """ 1150 Initialize the manager with the root node of the logger hierarchy. 1151 """ 1152 self.root = rootnode 1153 self.disable = 0 1154 self.emittedNoHandlerWarning = False 1155 self.loggerDict = {} 1156 self.loggerClass = None 1157 self.logRecordFactory = None 1158 1159 def getLogger(self, name): 1160 """ 1161 Get a logger with the specified name (channel name), creating it 1162 if it doesn't yet exist. This name is a dot-separated hierarchical 1163 name, such as "a", "a.b", "a.b.c" or similar. 1164 1165 If a PlaceHolder existed for the specified name [i.e. the logger 1166 didn't exist but a child of it did], replace it with the created 1167 logger and fix up the parent/child references which pointed to the 1168 placeholder to now point to the logger. 1169 """ 1170 rv = None 1171 if not isinstance(name, str): 1172 raise TypeError('A logger name must be a string') 1173 _acquireLock() 1174 try: 1175 if name in self.loggerDict: 1176 rv = self.loggerDict[name] 1177 if isinstance(rv, PlaceHolder): 1178 ph = rv 1179 rv = (self.loggerClass or _loggerClass)(name) 1180 rv.manager = self 1181 self.loggerDict[name] = rv 1182 self._fixupChildren(ph, rv) 1183 self._fixupParents(rv) 1184 else: 1185 rv = (self.loggerClass or _loggerClass)(name) 1186 rv.manager = self 1187 self.loggerDict[name] = rv 1188 self._fixupParents(rv) 1189 finally: 1190 _releaseLock() 1191 return rv 1192 1193 def setLoggerClass(self, klass): 1194 """ 1195 Set the class to be used when instantiating a logger with this Manager. 1196 """ 1197 if klass != Logger: 1198 if not issubclass(klass, Logger): 1199 raise TypeError("logger not derived from logging.Logger: " 1200 + klass.__name__) 1201 self.loggerClass = klass 1202 1203 def setLogRecordFactory(self, factory): 1204 """ 1205 Set the factory to be used when instantiating a log record with this 1206 Manager. 1207 """ 1208 self.logRecordFactory = factory 1209 1210 def _fixupParents(self, alogger): 1211 """ 1212 Ensure that there are either loggers or placeholders all the way 1213 from the specified logger to the root of the logger hierarchy. 1214 """ 1215 name = alogger.name 1216 i = name.rfind(".") 1217 rv = None 1218 while (i > 0) and not rv: 1219 substr = name[:i] 1220 if substr not in self.loggerDict: 1221 self.loggerDict[substr] = PlaceHolder(alogger) 1222 else: 1223 obj = self.loggerDict[substr] 1224 if isinstance(obj, Logger): 1225 rv = obj 1226 else: 1227 assert isinstance(obj, PlaceHolder) 1228 obj.append(alogger) 1229 i = name.rfind(".", 0, i - 1) 1230 if not rv: 1231 rv = self.root 1232 alogger.parent = rv 1233 1234 def _fixupChildren(self, ph, alogger): 1235 """ 1236 Ensure that children of the placeholder ph are connected to the 1237 specified logger. 1238 """ 1239 name = alogger.name 1240 namelen = len(name) 1241 for c in ph.loggerMap.keys(): 1242 #The if means ... if not c.parent.name.startswith(nm) 1243 if c.parent.name[:namelen] != name: 1244 alogger.parent = c.parent 1245 c.parent = alogger 1246 1247 #--------------------------------------------------------------------------- 1248 # Logger classes and functions 1249 #--------------------------------------------------------------------------- 1250 1251 class Logger(Filterer): 1252 """ 1253 Instances of the Logger class represent a single logging channel. A 1254 "logging channel" indicates an area of an application. Exactly how an 1255 "area" is defined is up to the application developer. Since an 1256 application can have any number of areas, logging channels are identified 1257 by a unique string. Application areas can be nested (e.g. an area 1258 of "input processing" might include sub-areas "read CSV files", "read 1259 XLS files" and "read Gnumeric files"). To cater for this natural nesting, 1260 channel names are organized into a namespace hierarchy where levels are 1261 separated by periods, much like the Java or Python package namespace. So 1262 in the instance given above, channel names might be "input" for the upper 1263 level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. 1264 There is no arbitrary limit to the depth of nesting. 1265 """ 1266 def __init__(self, name, level=NOTSET): 1267 """ 1268 Initialize the logger with a name and an optional level. 1269 """ 1270 Filterer.__init__(self) 1271 self.name = name 1272 self.level = _checkLevel(level) 1273 self.parent = None 1274 self.propagate = True 1275 self.handlers = [] 1276 self.disabled = False 1277 1278 def setLevel(self, level): 1279 """ 1280 Set the logging level of this logger. level must be an int or a str. 1281 """ 1282 self.level = _checkLevel(level) 1283 1284 def debug(self, msg, *args, **kwargs): 1285 """ 1286 Log 'msg % args' with severity 'DEBUG'. 1287 1288 To pass exception information, use the keyword argument exc_info with 1289 a true value, e.g. 1290 1291 logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) 1292 """ 1293 if self.isEnabledFor(DEBUG): 1294 self._log(DEBUG, msg, args, **kwargs) 1295 1296 def info(self, msg, *args, **kwargs): 1297 """ 1298 Log 'msg % args' with severity 'INFO'. 1299 1300 To pass exception information, use the keyword argument exc_info with 1301 a true value, e.g. 1302 1303 logger.info("Houston, we have a %s", "interesting problem", exc_info=1) 1304 """ 1305 if self.isEnabledFor(INFO): 1306 self._log(INFO, msg, args, **kwargs) 1307 1308 def warning(self, msg, *args, **kwargs): 1309 """ 1310 Log 'msg % args' with severity 'WARNING'. 1311 1312 To pass exception information, use the keyword argument exc_info with 1313 a true value, e.g. 1314 1315 logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1) 1316 """ 1317 if self.isEnabledFor(WARNING): 1318 self._log(WARNING, msg, args, **kwargs) 1319 1320 def warn(self, msg, *args, **kwargs): 1321 warnings.warn("The 'warn' method is deprecated, " 1322 "use 'warning' instead", DeprecationWarning, 2) 1323 self.warning(msg, *args, **kwargs) 1324 1325 def error(self, msg, *args, **kwargs): 1326 """ 1327 Log 'msg % args' with severity 'ERROR'. 1328 1329 To pass exception information, use the keyword argument exc_info with 1330 a true value, e.g. 1331 1332 logger.error("Houston, we have a %s", "major problem", exc_info=1) 1333 """ 1334 if self.isEnabledFor(ERROR): 1335 self._log(ERROR, msg, args, **kwargs) 1336 1337 def exception(self, msg, *args, exc_info=True, **kwargs): 1338 """ 1339 Convenience method for logging an ERROR with exception information. 1340 """ 1341 self.error(msg, *args, exc_info=exc_info, **kwargs) 1342 1343 def critical(self, msg, *args, **kwargs): 1344 """ 1345 Log 'msg % args' with severity 'CRITICAL'. 1346 1347 To pass exception information, use the keyword argument exc_info with 1348 a true value, e.g. 1349 1350 logger.critical("Houston, we have a %s", "major disaster", exc_info=1) 1351 """ 1352 if self.isEnabledFor(CRITICAL): 1353 self._log(CRITICAL, msg, args, **kwargs) 1354 1355 fatal = critical 1356 1357 def log(self, level, msg, *args, **kwargs): 1358 """ 1359 Log 'msg % args' with the integer severity 'level'. 1360 1361 To pass exception information, use the keyword argument exc_info with 1362 a true value, e.g. 1363 1364 logger.log(level, "We have a %s", "mysterious problem", exc_info=1) 1365 """ 1366 if not isinstance(level, int): 1367 if raiseExceptions: 1368 raise TypeError("level must be an integer") 1369 else: 1370 return 1371 if self.isEnabledFor(level): 1372 self._log(level, msg, args, **kwargs) 1373 1374 def findCaller(self, stack_info=False): 1375 """ 1376 Find the stack frame of the caller so that we can note the source 1377 file name, line number and function name. 1378 """ 1379 f = currentframe() 1380 #On some versions of IronPython, currentframe() returns None if 1381 #IronPython isn't run with -X:Frames. 1382 if f is not None: 1383 f = f.f_back 1384 rv = "(unknown file)", 0, "(unknown function)", None 1385 while hasattr(f, "f_code"): 1386 co = f.f_code 1387 filename = os.path.normcase(co.co_filename) 1388 if filename == _srcfile: 1389 f = f.f_back 1390 continue 1391 sinfo = None 1392 if stack_info: 1393 sio = io.StringIO() 1394 sio.write('Stack (most recent call last):\n') 1395 traceback.print_stack(f, file=sio) 1396 sinfo = sio.getvalue() 1397 if sinfo[-1] == '\n': 1398 sinfo = sinfo[:-1] 1399 sio.close() 1400 rv = (co.co_filename, f.f_lineno, co.co_name, sinfo) 1401 break 1402 return rv 1403 1404 def makeRecord(self, name, level, fn, lno, msg, args, exc_info, 1405 func=None, extra=None, sinfo=None): 1406 """ 1407 A factory method which can be overridden in subclasses to create 1408 specialized LogRecords. 1409 """ 1410 rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func, 1411 sinfo) 1412 if extra is not None: 1413 for key in extra: 1414 if (key in ["message", "asctime"]) or (key in rv.__dict__): 1415 raise KeyError("Attempt to overwrite %r in LogRecord" % key) 1416 rv.__dict__[key] = extra[key] 1417 return rv 1418 1419 def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False): 1420 """ 1421 Low-level logging routine which creates a LogRecord and then calls 1422 all the handlers of this logger to handle the record. 1423 """ 1424 sinfo = None 1425 if _srcfile: 1426 #IronPython doesn't track Python frames, so findCaller raises an 1427 #exception on some versions of IronPython. We trap it here so that 1428 #IronPython can use logging. 1429 try: 1430 fn, lno, func, sinfo = self.findCaller(stack_info) 1431 except ValueError: # pragma: no cover 1432 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1433 else: # pragma: no cover 1434 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1435 if exc_info: 1436 if isinstance(exc_info, BaseException): 1437 exc_info = (type(exc_info), exc_info, exc_info.__traceback__) 1438 elif not isinstance(exc_info, tuple): 1439 exc_info = sys.exc_info() 1440 record = self.makeRecord(self.name, level, fn, lno, msg, args, 1441 exc_info, func, extra, sinfo) 1442 self.handle(record) 1443 1444 def handle(self, record): 1445 """ 1446 Call the handlers for the specified record. 1447 1448 This method is used for unpickled records received from a socket, as 1449 well as those created locally. Logger-level filtering is applied. 1450 """ 1451 if (not self.disabled) and self.filter(record): 1452 self.callHandlers(record) 1453 1454 def addHandler(self, hdlr): 1455 """ 1456 Add the specified handler to this logger. 1457 """ 1458 _acquireLock() 1459 try: 1460 if not (hdlr in self.handlers): 1461 self.handlers.append(hdlr) 1462 finally: 1463 _releaseLock() 1464 1465 def removeHandler(self, hdlr): 1466 """ 1467 Remove the specified handler from this logger. 1468 """ 1469 _acquireLock() 1470 try: 1471 if hdlr in self.handlers: 1472 self.handlers.remove(hdlr) 1473 finally: 1474 _releaseLock() 1475 1476 def hasHandlers(self): 1477 """ 1478 See if this logger has any handlers configured. 1479 1480 Loop through all handlers for this logger and its parents in the 1481 logger hierarchy. Return True if a handler was found, else False. 1482 Stop searching up the hierarchy whenever a logger with the "propagate" 1483 attribute set to zero is found - that will be the last logger which 1484 is checked for the existence of handlers. 1485 """ 1486 c = self 1487 rv = False 1488 while c: 1489 if c.handlers: 1490 rv = True 1491 break 1492 if not c.propagate: 1493 break 1494 else: 1495 c = c.parent 1496 return rv 1497 1498 def callHandlers(self, record): 1499 """ 1500 Pass a record to all relevant handlers. 1501 1502 Loop through all handlers for this logger and its parents in the 1503 logger hierarchy. If no handler was found, output a one-off error 1504 message to sys.stderr. Stop searching up the hierarchy whenever a 1505 logger with the "propagate" attribute set to zero is found - that 1506 will be the last logger whose handlers are called. 1507 """ 1508 c = self 1509 found = 0 1510 while c: 1511 for hdlr in c.handlers: 1512 found = found + 1 1513 if record.levelno >= hdlr.level: 1514 hdlr.handle(record) 1515 if not c.propagate: 1516 c = None #break out 1517 else: 1518 c = c.parent 1519 if (found == 0): 1520 if lastResort: 1521 if record.levelno >= lastResort.level: 1522 lastResort.handle(record) 1523 elif raiseExceptions and not self.manager.emittedNoHandlerWarning: 1524 sys.stderr.write("No handlers could be found for logger" 1525 " \"%s\"\n" % self.name) 1526 self.manager.emittedNoHandlerWarning = True 1527 1528 def getEffectiveLevel(self): 1529 """ 1530 Get the effective level for this logger. 1531 1532 Loop through this logger and its parents in the logger hierarchy, 1533 looking for a non-zero logging level. Return the first one found. 1534 """ 1535 logger = self 1536 while logger: 1537 if logger.level: 1538 return logger.level 1539 logger = logger.parent 1540 return NOTSET 1541 1542 def isEnabledFor(self, level): 1543 """ 1544 Is this logger enabled for level 'level'? 1545 """ 1546 if self.manager.disable >= level: 1547 return False 1548 return level >= self.getEffectiveLevel() 1549 1550 def getChild(self, suffix): 1551 """ 1552 Get a logger which is a descendant to this one. 1553 1554 This is a convenience method, such that 1555 1556 logging.getLogger('abc').getChild('def.ghi') 1557 1558 is the same as 1559 1560 logging.getLogger('abc.def.ghi') 1561 1562 It's useful, for example, when the parent logger is named using 1563 __name__ rather than a literal string. 1564 """ 1565 if self.root is not self: 1566 suffix = '.'.join((self.name, suffix)) 1567 return self.manager.getLogger(suffix) 1568 1569 def __repr__(self): 1570 level = getLevelName(self.getEffectiveLevel()) 1571 return '<%s %s (%s)>' % (self.__class__.__name__, self.name, level) 1572 1573 1574 class RootLogger(Logger): 1575 """ 1576 A root logger is not that different to any other logger, except that 1577 it must have a logging level and there is only one instance of it in 1578 the hierarchy. 1579 """ 1580 def __init__(self, level): 1581 """ 1582 Initialize the logger with the name "root". 1583 """ 1584 Logger.__init__(self, "root", level) 1585 1586 _loggerClass = Logger 1587 1588 class LoggerAdapter(object): 1589 """ 1590 An adapter for loggers which makes it easier to specify contextual 1591 information in logging output. 1592 """ 1593 1594 def __init__(self, logger, extra): 1595 """ 1596 Initialize the adapter with a logger and a dict-like object which 1597 provides contextual information. This constructor signature allows 1598 easy stacking of LoggerAdapters, if so desired. 1599 1600 You can effectively pass keyword arguments as shown in the 1601 following example: 1602 1603 adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) 1604 """ 1605 self.logger = logger 1606 self.extra = extra 1607 1608 def process(self, msg, kwargs): 1609 """ 1610 Process the logging message and keyword arguments passed in to 1611 a logging call to insert contextual information. You can either 1612 manipulate the message itself, the keyword args or both. Return 1613 the message and kwargs modified (or not) to suit your needs. 1614 1615 Normally, you'll only need to override this one method in a 1616 LoggerAdapter subclass for your specific needs. 1617 """ 1618 kwargs["extra"] = self.extra 1619 return msg, kwargs 1620 1621 # 1622 # Boilerplate convenience methods 1623 # 1624 def debug(self, msg, *args, **kwargs): 1625 """ 1626 Delegate a debug call to the underlying logger. 1627 """ 1628 self.log(DEBUG, msg, *args, **kwargs) 1629 1630 def info(self, msg, *args, **kwargs): 1631 """ 1632 Delegate an info call to the underlying logger. 1633 """ 1634 self.log(INFO, msg, *args, **kwargs) 1635 1636 def warning(self, msg, *args, **kwargs): 1637 """ 1638 Delegate a warning call to the underlying logger. 1639 """ 1640 self.log(WARNING, msg, *args, **kwargs) 1641 1642 def warn(self, msg, *args, **kwargs): 1643 warnings.warn("The 'warn' method is deprecated, " 1644 "use 'warning' instead", DeprecationWarning, 2) 1645 self.warning(msg, *args, **kwargs) 1646 1647 def error(self, msg, *args, **kwargs): 1648 """ 1649 Delegate an error call to the underlying logger. 1650 """ 1651 self.log(ERROR, msg, *args, **kwargs) 1652 1653 def exception(self, msg, *args, exc_info=True, **kwargs): 1654 """ 1655 Delegate an exception call to the underlying logger. 1656 """ 1657 self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs) 1658 1659 def critical(self, msg, *args, **kwargs): 1660 """ 1661 Delegate a critical call to the underlying logger. 1662 """ 1663 self.log(CRITICAL, msg, *args, **kwargs) 1664 1665 def log(self, level, msg, *args, **kwargs): 1666 """ 1667 Delegate a log call to the underlying logger, after adding 1668 contextual information from this adapter instance. 1669 """ 1670 if self.isEnabledFor(level): 1671 msg, kwargs = self.process(msg, kwargs) 1672 self.logger.log(level, msg, *args, **kwargs) 1673 1674 def isEnabledFor(self, level): 1675 """ 1676 Is this logger enabled for level 'level'? 1677 """ 1678 if self.logger.manager.disable >= level: 1679 return False 1680 return level >= self.getEffectiveLevel() 1681 1682 def setLevel(self, level): 1683 """ 1684 Set the specified level on the underlying logger. 1685 """ 1686 self.logger.setLevel(level) 1687 1688 def getEffectiveLevel(self): 1689 """ 1690 Get the effective level for the underlying logger. 1691 """ 1692 return self.logger.getEffectiveLevel() 1693 1694 def hasHandlers(self): 1695 """ 1696 See if the underlying logger has any handlers. 1697 """ 1698 return self.logger.hasHandlers() 1699 1700 def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False): 1701 """ 1702 Low-level log implementation, proxied to allow nested logger adapters. 1703 """ 1704 return self.logger._log( 1705 level, 1706 msg, 1707 args, 1708 exc_info=exc_info, 1709 extra=extra, 1710 stack_info=stack_info, 1711 ) 1712 1713 @property 1714 def manager(self): 1715 return self.logger.manager 1716 1717 @manager.setter 1718 def manager(self, value): 1719 self.logger.manager = value 1720 1721 @property 1722 def name(self): 1723 return self.logger.name 1724 1725 def __repr__(self): 1726 logger = self.logger 1727 level = getLevelName(logger.getEffectiveLevel()) 1728 return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level) 1729 1730 root = RootLogger(WARNING) 1731 Logger.root = root 1732 Logger.manager = Manager(Logger.root) 1733 1734 #--------------------------------------------------------------------------- 1735 # Configuration classes and functions 1736 #--------------------------------------------------------------------------- 1737 1738 def basicConfig(**kwargs): 1739 """ 1740 Do basic configuration for the logging system. 1741 1742 This function does nothing if the root logger already has handlers 1743 configured. It is a convenience method intended for use by simple scripts 1744 to do one-shot configuration of the logging package. 1745 1746 The default behaviour is to create a StreamHandler which writes to 1747 sys.stderr, set a formatter using the BASIC_FORMAT format string, and 1748 add the handler to the root logger. 1749 1750 A number of optional keyword arguments may be specified, which can alter 1751 the default behaviour. 1752 1753 filename Specifies that a FileHandler be created, using the specified 1754 filename, rather than a StreamHandler. 1755 filemode Specifies the mode to open the file, if filename is specified 1756 (if filemode is unspecified, it defaults to 'a'). 1757 format Use the specified format string for the handler. 1758 datefmt Use the specified date/time format. 1759 style If a format string is specified, use this to specify the 1760 type of format string (possible values '%', '{', '$', for 1761 %-formatting, :meth:`str.format` and :class:`string.Template` 1762 - defaults to '%'). 1763 level Set the root logger level to the specified level. 1764 stream Use the specified stream to initialize the StreamHandler. Note 1765 that this argument is incompatible with 'filename' - if both 1766 are present, 'stream' is ignored. 1767 handlers If specified, this should be an iterable of already created 1768 handlers, which will be added to the root handler. Any handler 1769 in the list which does not have a formatter assigned will be 1770 assigned the formatter created in this function. 1771 1772 Note that you could specify a stream created using open(filename, mode) 1773 rather than passing the filename and mode in. However, it should be 1774 remembered that StreamHandler does not close its stream (since it may be 1775 using sys.stdout or sys.stderr), whereas FileHandler closes its stream 1776 when the handler is closed. 1777 1778 .. versionchanged:: 3.2 1779 Added the ``style`` parameter. 1780 1781 .. versionchanged:: 3.3 1782 Added the ``handlers`` parameter. A ``ValueError`` is now thrown for 1783 incompatible arguments (e.g. ``handlers`` specified together with 1784 ``filename``/``filemode``, or ``filename``/``filemode`` specified 1785 together with ``stream``, or ``handlers`` specified together with 1786 ``stream``. 1787 """ 1788 # Add thread safety in case someone mistakenly calls 1789 # basicConfig() from multiple threads 1790 _acquireLock() 1791 try: 1792 if len(root.handlers) == 0: 1793 handlers = kwargs.pop("handlers", None) 1794 if handlers is None: 1795 if "stream" in kwargs and "filename" in kwargs: 1796 raise ValueError("'stream' and 'filename' should not be " 1797 "specified together") 1798 else: 1799 if "stream" in kwargs or "filename" in kwargs: 1800 raise ValueError("'stream' or 'filename' should not be " 1801 "specified together with 'handlers'") 1802 if handlers is None: 1803 filename = kwargs.pop("filename", None) 1804 mode = kwargs.pop("filemode", 'a') 1805 if filename: 1806 h = FileHandler(filename, mode) 1807 else: 1808 stream = kwargs.pop("stream", None) 1809 h = StreamHandler(stream) 1810 handlers = [h] 1811 dfs = kwargs.pop("datefmt", None) 1812 style = kwargs.pop("style", '%') 1813 if style not in _STYLES: 1814 raise ValueError('Style must be one of: %s' % ','.join( 1815 _STYLES.keys())) 1816 fs = kwargs.pop("format", _STYLES[style][1]) 1817 fmt = Formatter(fs, dfs, style) 1818 for h in handlers: 1819 if h.formatter is None: 1820 h.setFormatter(fmt) 1821 root.addHandler(h) 1822 level = kwargs.pop("level", None) 1823 if level is not None: 1824 root.setLevel(level) 1825 if kwargs: 1826 keys = ', '.join(kwargs.keys()) 1827 raise ValueError('Unrecognised argument(s): %s' % keys) 1828 finally: 1829 _releaseLock() 1830 1831 #--------------------------------------------------------------------------- 1832 # Utility functions at module level. 1833 # Basically delegate everything to the root logger. 1834 #--------------------------------------------------------------------------- 1835 1836 def getLogger(name=None): 1837 """ 1838 Return a logger with the specified name, creating it if necessary. 1839 1840 If no name is specified, return the root logger. 1841 """ 1842 if name: 1843 return Logger.manager.getLogger(name) 1844 else: 1845 return root 1846 1847 def critical(msg, *args, **kwargs): 1848 """ 1849 Log a message with severity 'CRITICAL' on the root logger. If the logger 1850 has no handlers, call basicConfig() to add a console handler with a 1851 pre-defined format. 1852 """ 1853 if len(root.handlers) == 0: 1854 basicConfig() 1855 root.critical(msg, *args, **kwargs) 1856 1857 fatal = critical 1858 1859 def error(msg, *args, **kwargs): 1860 """ 1861 Log a message with severity 'ERROR' on the root logger. If the logger has 1862 no handlers, call basicConfig() to add a console handler with a pre-defined 1863 format. 1864 """ 1865 if len(root.handlers) == 0: 1866 basicConfig() 1867 root.error(msg, *args, **kwargs) 1868 1869 def exception(msg, *args, exc_info=True, **kwargs): 1870 """ 1871 Log a message with severity 'ERROR' on the root logger, with exception 1872 information. If the logger has no handlers, basicConfig() is called to add 1873 a console handler with a pre-defined format. 1874 """ 1875 error(msg, *args, exc_info=exc_info, **kwargs) 1876 1877 def warning(msg, *args, **kwargs): 1878 """ 1879 Log a message with severity 'WARNING' on the root logger. If the logger has 1880 no handlers, call basicConfig() to add a console handler with a pre-defined 1881 format. 1882 """ 1883 if len(root.handlers) == 0: 1884 basicConfig() 1885 root.warning(msg, *args, **kwargs) 1886 1887 def warn(msg, *args, **kwargs): 1888 warnings.warn("The 'warn' function is deprecated, " 1889 "use 'warning' instead", DeprecationWarning, 2) 1890 warning(msg, *args, **kwargs) 1891 1892 def info(msg, *args, **kwargs): 1893 """ 1894 Log a message with severity 'INFO' on the root logger. If the logger has 1895 no handlers, call basicConfig() to add a console handler with a pre-defined 1896 format. 1897 """ 1898 if len(root.handlers) == 0: 1899 basicConfig() 1900 root.info(msg, *args, **kwargs) 1901 1902 def debug(msg, *args, **kwargs): 1903 """ 1904 Log a message with severity 'DEBUG' on the root logger. If the logger has 1905 no handlers, call basicConfig() to add a console handler with a pre-defined 1906 format. 1907 """ 1908 if len(root.handlers) == 0: 1909 basicConfig() 1910 root.debug(msg, *args, **kwargs) 1911 1912 def log(level, msg, *args, **kwargs): 1913 """ 1914 Log 'msg % args' with the integer severity 'level' on the root logger. If 1915 the logger has no handlers, call basicConfig() to add a console handler 1916 with a pre-defined format. 1917 """ 1918 if len(root.handlers) == 0: 1919 basicConfig() 1920 root.log(level, msg, *args, **kwargs) 1921 1922 def disable(level): 1923 """ 1924 Disable all logging calls of severity 'level' and below. 1925 """ 1926 root.manager.disable = level 1927 1928 def shutdown(handlerList=_handlerList): 1929 """ 1930 Perform any cleanup actions in the logging system (e.g. flushing 1931 buffers). 1932 1933 Should be called at application exit. 1934 """ 1935 for wr in reversed(handlerList[:]): 1936 #errors might occur, for example, if files are locked 1937 #we just ignore them if raiseExceptions is not set 1938 try: 1939 h = wr() 1940 if h: 1941 try: 1942 h.acquire() 1943 h.flush() 1944 h.close() 1945 except (OSError, ValueError): 1946 # Ignore errors which might be caused 1947 # because handlers have been closed but 1948 # references to them are still around at 1949 # application exit. 1950 pass 1951 finally: 1952 h.release() 1953 except: # ignore everything, as we're shutting down 1954 if raiseExceptions: 1955 raise 1956 #else, swallow 1957 1958 #Let's try and shutdown automatically on application exit... 1959 import atexit 1960 atexit.register(shutdown) 1961 1962 # Null handler 1963 1964 class NullHandler(Handler): 1965 """ 1966 This handler does nothing. It's intended to be used to avoid the 1967 "No handlers could be found for logger XXX" one-off warning. This is 1968 important for library code, which may contain code to log events. If a user 1969 of the library does not configure logging, the one-off warning might be 1970 produced; to avoid this, the library developer simply needs to instantiate 1971 a NullHandler and add it to the top-level logger of the library module or 1972 package. 1973 """ 1974 def handle(self, record): 1975 """Stub.""" 1976 1977 def emit(self, record): 1978 """Stub.""" 1979 1980 def createLock(self): 1981 self.lock = None 1982 1983 # Warnings integration 1984 1985 _warnings_showwarning = None 1986 1987 def _showwarning(message, category, filename, lineno, file=None, line=None): 1988 """ 1989 Implementation of showwarnings which redirects to logging, which will first 1990 check to see if the file parameter is None. If a file is specified, it will 1991 delegate to the original warnings implementation of showwarning. Otherwise, 1992 it will call warnings.formatwarning and will log the resulting string to a 1993 warnings logger named "py.warnings" with level logging.WARNING. 1994 """ 1995 if file is not None: 1996 if _warnings_showwarning is not None: 1997 _warnings_showwarning(message, category, filename, lineno, file, line) 1998 else: 1999 s = warnings.formatwarning(message, category, filename, lineno, line) 2000 logger = getLogger("py.warnings") 2001 if not logger.handlers: 2002 logger.addHandler(NullHandler()) 2003 logger.warning("%s", s) 2004 2005 def captureWarnings(capture): 2006 """ 2007 If capture is true, redirect all warnings to the logging package. 2008 If capture is False, ensure that warnings are not redirected to logging 2009 but to their original destinations. 2010 """ 2011 global _warnings_showwarning 2012 if capture: 2013 if _warnings_showwarning is None: 2014 _warnings_showwarning = warnings.showwarning 2015 warnings.showwarning = _showwarning 2016 else: 2017 if _warnings_showwarning is not None: 2018 warnings.showwarning = _warnings_showwarning 2019 _warnings_showwarning = None
1 # Copyright 2001-2014 by Vinay Sajip. All Rights Reserved. 2 # 3 # Permission to use, copy, modify, and distribute this software and its 4 # documentation for any purpose and without fee is hereby granted, 5 # provided that the above copyright notice appear in all copies and that 6 # both that copyright notice and this permission notice appear in 7 # supporting documentation, and that the name of Vinay Sajip 8 # not be used in advertising or publicity pertaining to distribution 9 # of the software without specific, written prior permission. 10 # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING 11 # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL 12 # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR 13 # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER 14 # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 15 # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 17 """ 18 Configuration functions for the logging package for Python. The core package 19 is based on PEP 282 and comments thereto in comp.lang.python, and influenced 20 by Apache's log4j system. 21 22 Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved. 23 24 To use, simply 'import logging' and log away! 25 """ 26 27 import errno 28 import io 29 import logging 30 import logging.handlers 31 import re 32 import struct 33 import sys 34 import traceback 35 36 try: 37 import _thread as thread 38 import threading 39 except ImportError: #pragma: no cover 40 thread = None 41 42 from socketserver import ThreadingTCPServer, StreamRequestHandler 43 44 45 DEFAULT_LOGGING_CONFIG_PORT = 9030 46 47 RESET_ERROR = errno.ECONNRESET 48 49 # 50 # The following code implements a socket listener for on-the-fly 51 # reconfiguration of logging. 52 # 53 # _listener holds the server object doing the listening 54 _listener = None 55 56 def fileConfig(fname, defaults=None, disable_existing_loggers=True): 57 """ 58 Read the logging configuration from a ConfigParser-format file. 59 60 This can be called several times from an application, allowing an end user 61 the ability to select from various pre-canned configurations (if the 62 developer provides a mechanism to present the choices and load the chosen 63 configuration). 64 """ 65 import configparser 66 67 if isinstance(fname, configparser.RawConfigParser): 68 cp = fname 69 else: 70 cp = configparser.ConfigParser(defaults) 71 if hasattr(fname, 'readline'): 72 cp.read_file(fname) 73 else: 74 cp.read(fname) 75 76 formatters = _create_formatters(cp) 77 78 # critical section 79 logging._acquireLock() 80 try: 81 logging._handlers.clear() 82 del logging._handlerList[:] 83 # Handlers add themselves to logging._handlers 84 handlers = _install_handlers(cp, formatters) 85 _install_loggers(cp, handlers, disable_existing_loggers) 86 finally: 87 logging._releaseLock() 88 89 90 def _resolve(name): 91 """Resolve a dotted name to a global object.""" 92 name = name.split('.') 93 used = name.pop(0) 94 found = __import__(used) 95 for n in name: 96 used = used + '.' + n 97 try: 98 found = getattr(found, n) 99 except AttributeError: 100 __import__(used) 101 found = getattr(found, n) 102 return found 103 104 def _strip_spaces(alist): 105 return map(lambda x: x.strip(), alist) 106 107 def _create_formatters(cp): 108 """Create and return formatters""" 109 flist = cp["formatters"]["keys"] 110 if not len(flist): 111 return {} 112 flist = flist.split(",") 113 flist = _strip_spaces(flist) 114 formatters = {} 115 for form in flist: 116 sectname = "formatter_%s" % form 117 fs = cp.get(sectname, "format", raw=True, fallback=None) 118 dfs = cp.get(sectname, "datefmt", raw=True, fallback=None) 119 stl = cp.get(sectname, "style", raw=True, fallback='%') 120 c = logging.Formatter 121 class_name = cp[sectname].get("class") 122 if class_name: 123 c = _resolve(class_name) 124 f = c(fs, dfs, stl) 125 formatters[form] = f 126 return formatters 127 128 129 def _install_handlers(cp, formatters): 130 """Install and return handlers""" 131 hlist = cp["handlers"]["keys"] 132 if not len(hlist): 133 return {} 134 hlist = hlist.split(",") 135 hlist = _strip_spaces(hlist) 136 handlers = {} 137 fixups = [] #for inter-handler references 138 for hand in hlist: 139 section = cp["handler_%s" % hand] 140 klass = section["class"] 141 fmt = section.get("formatter", "") 142 try: 143 klass = eval(klass, vars(logging)) 144 except (AttributeError, NameError): 145 klass = _resolve(klass) 146 args = section["args"] 147 args = eval(args, vars(logging)) 148 h = klass(*args) 149 if "level" in section: 150 level = section["level"] 151 h.setLevel(level) 152 if len(fmt): 153 h.setFormatter(formatters[fmt]) 154 if issubclass(klass, logging.handlers.MemoryHandler): 155 target = section.get("target", "") 156 if len(target): #the target handler may not be loaded yet, so keep for later... 157 fixups.append((h, target)) 158 handlers[hand] = h 159 #now all handlers are loaded, fixup inter-handler references... 160 for h, t in fixups: 161 h.setTarget(handlers[t]) 162 return handlers 163 164 def _handle_existing_loggers(existing, child_loggers, disable_existing): 165 """ 166 When (re)configuring logging, handle loggers which were in the previous 167 configuration but are not in the new configuration. There's no point 168 deleting them as other threads may continue to hold references to them; 169 and by disabling them, you stop them doing any logging. 170 171 However, don't disable children of named loggers, as that's probably not 172 what was intended by the user. Also, allow existing loggers to NOT be 173 disabled if disable_existing is false. 174 """ 175 root = logging.root 176 for log in existing: 177 logger = root.manager.loggerDict[log] 178 if log in child_loggers: 179 logger.level = logging.NOTSET 180 logger.handlers = [] 181 logger.propagate = True 182 else: 183 logger.disabled = disable_existing 184 185 def _install_loggers(cp, handlers, disable_existing): 186 """Create and install loggers""" 187 188 # configure the root first 189 llist = cp["loggers"]["keys"] 190 llist = llist.split(",") 191 llist = list(map(lambda x: x.strip(), llist)) 192 llist.remove("root") 193 section = cp["logger_root"] 194 root = logging.root 195 log = root 196 if "level" in section: 197 level = section["level"] 198 log.setLevel(level) 199 for h in root.handlers[:]: 200 root.removeHandler(h) 201 hlist = section["handlers"] 202 if len(hlist): 203 hlist = hlist.split(",") 204 hlist = _strip_spaces(hlist) 205 for hand in hlist: 206 log.addHandler(handlers[hand]) 207 208 #and now the others... 209 #we don't want to lose the existing loggers, 210 #since other threads may have pointers to them. 211 #existing is set to contain all existing loggers, 212 #and as we go through the new configuration we 213 #remove any which are configured. At the end, 214 #what's left in existing is the set of loggers 215 #which were in the previous configuration but 216 #which are not in the new configuration. 217 existing = list(root.manager.loggerDict.keys()) 218 #The list needs to be sorted so that we can 219 #avoid disabling child loggers of explicitly 220 #named loggers. With a sorted list it is easier 221 #to find the child loggers. 222 existing.sort() 223 #We'll keep the list of existing loggers 224 #which are children of named loggers here... 225 child_loggers = [] 226 #now set up the new ones... 227 for log in llist: 228 section = cp["logger_%s" % log] 229 qn = section["qualname"] 230 propagate = section.getint("propagate", fallback=1) 231 logger = logging.getLogger(qn) 232 if qn in existing: 233 i = existing.index(qn) + 1 # start with the entry after qn 234 prefixed = qn + "." 235 pflen = len(prefixed) 236 num_existing = len(existing) 237 while i < num_existing: 238 if existing[i][:pflen] == prefixed: 239 child_loggers.append(existing[i]) 240 i += 1 241 existing.remove(qn) 242 if "level" in section: 243 level = section["level"] 244 logger.setLevel(level) 245 for h in logger.handlers[:]: 246 logger.removeHandler(h) 247 logger.propagate = propagate 248 logger.disabled = 0 249 hlist = section["handlers"] 250 if len(hlist): 251 hlist = hlist.split(",") 252 hlist = _strip_spaces(hlist) 253 for hand in hlist: 254 logger.addHandler(handlers[hand]) 255 256 #Disable any old loggers. There's no point deleting 257 #them as other threads may continue to hold references 258 #and by disabling them, you stop them doing any logging. 259 #However, don't disable children of named loggers, as that's 260 #probably not what was intended by the user. 261 #for log in existing: 262 # logger = root.manager.loggerDict[log] 263 # if log in child_loggers: 264 # logger.level = logging.NOTSET 265 # logger.handlers = [] 266 # logger.propagate = 1 267 # elif disable_existing_loggers: 268 # logger.disabled = 1 269 _handle_existing_loggers(existing, child_loggers, disable_existing) 270 271 IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) 272 273 274 def valid_ident(s): 275 m = IDENTIFIER.match(s) 276 if not m: 277 raise ValueError('Not a valid Python identifier: %r' % s) 278 return True 279 280 281 class ConvertingMixin(object): 282 """For ConvertingXXX's, this mixin class provides common functions""" 283 284 def convert_with_key(self, key, value, replace=True): 285 result = self.configurator.convert(value) 286 #If the converted value is different, save for next time 287 if value is not result: 288 if replace: 289 self[key] = result 290 if type(result) in (ConvertingDict, ConvertingList, 291 ConvertingTuple): 292 result.parent = self 293 result.key = key 294 return result 295 296 def convert(self, value): 297 result = self.configurator.convert(value) 298 if value is not result: 299 if type(result) in (ConvertingDict, ConvertingList, 300 ConvertingTuple): 301 result.parent = self 302 return result 303 304 305 # The ConvertingXXX classes are wrappers around standard Python containers, 306 # and they serve to convert any suitable values in the container. The 307 # conversion converts base dicts, lists and tuples to their wrapped 308 # equivalents, whereas strings which match a conversion format are converted 309 # appropriately. 310 # 311 # Each wrapper should have a configurator attribute holding the actual 312 # configurator to use for conversion. 313 314 class ConvertingDict(dict, ConvertingMixin): 315 """A converting dictionary wrapper.""" 316 317 def __getitem__(self, key): 318 value = dict.__getitem__(self, key) 319 return self.convert_with_key(key, value) 320 321 def get(self, key, default=None): 322 value = dict.get(self, key, default) 323 return self.convert_with_key(key, value) 324 325 def pop(self, key, default=None): 326 value = dict.pop(self, key, default) 327 return self.convert_with_key(key, value, replace=False) 328 329 class ConvertingList(list, ConvertingMixin): 330 """A converting list wrapper.""" 331 def __getitem__(self, key): 332 value = list.__getitem__(self, key) 333 return self.convert_with_key(key, value) 334 335 def pop(self, idx=-1): 336 value = list.pop(self, idx) 337 return self.convert(value) 338 339 class ConvertingTuple(tuple, ConvertingMixin): 340 """A converting tuple wrapper.""" 341 def __getitem__(self, key): 342 value = tuple.__getitem__(self, key) 343 # Can't replace a tuple entry. 344 return self.convert_with_key(key, value, replace=False) 345 346 class BaseConfigurator(object): 347 """ 348 The configurator base class which defines some useful defaults. 349 """ 350 351 CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$') 352 353 WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') 354 DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') 355 INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') 356 DIGIT_PATTERN = re.compile(r'^\d+$') 357 358 value_converters = { 359 'ext' : 'ext_convert', 360 'cfg' : 'cfg_convert', 361 } 362 363 # We might want to use a different one, e.g. importlib 364 importer = staticmethod(__import__) 365 366 def __init__(self, config): 367 self.config = ConvertingDict(config) 368 self.config.configurator = self 369 370 def resolve(self, s): 371 """ 372 Resolve strings to objects using standard import and attribute 373 syntax. 374 """ 375 name = s.split('.') 376 used = name.pop(0) 377 try: 378 found = self.importer(used) 379 for frag in name: 380 used += '.' + frag 381 try: 382 found = getattr(found, frag) 383 except AttributeError: 384 self.importer(used) 385 found = getattr(found, frag) 386 return found 387 except ImportError: 388 e, tb = sys.exc_info()[1:] 389 v = ValueError('Cannot resolve %r: %s' % (s, e)) 390 v.__cause__, v.__traceback__ = e, tb 391 raise v 392 393 def ext_convert(self, value): 394 """Default converter for the ext:// protocol.""" 395 return self.resolve(value) 396 397 def cfg_convert(self, value): 398 """Default converter for the cfg:// protocol.""" 399 rest = value 400 m = self.WORD_PATTERN.match(rest) 401 if m is None: 402 raise ValueError("Unable to convert %r" % value) 403 else: 404 rest = rest[m.end():] 405 d = self.config[m.groups()[0]] 406 #print d, rest 407 while rest: 408 m = self.DOT_PATTERN.match(rest) 409 if m: 410 d = d[m.groups()[0]] 411 else: 412 m = self.INDEX_PATTERN.match(rest) 413 if m: 414 idx = m.groups()[0] 415 if not self.DIGIT_PATTERN.match(idx): 416 d = d[idx] 417 else: 418 try: 419 n = int(idx) # try as number first (most likely) 420 d = d[n] 421 except TypeError: 422 d = d[idx] 423 if m: 424 rest = rest[m.end():] 425 else: 426 raise ValueError('Unable to convert ' 427 '%r at %r' % (value, rest)) 428 #rest should be empty 429 return d 430 431 def convert(self, value): 432 """ 433 Convert values to an appropriate type. dicts, lists and tuples are 434 replaced by their converting alternatives. Strings are checked to 435 see if they have a conversion format and are converted if they do. 436 """ 437 if not isinstance(value, ConvertingDict) and isinstance(value, dict): 438 value = ConvertingDict(value) 439 value.configurator = self 440 elif not isinstance(value, ConvertingList) and isinstance(value, list): 441 value = ConvertingList(value) 442 value.configurator = self 443 elif not isinstance(value, ConvertingTuple) and\ 444 isinstance(value, tuple): 445 value = ConvertingTuple(value) 446 value.configurator = self 447 elif isinstance(value, str): # str for py3k 448 m = self.CONVERT_PATTERN.match(value) 449 if m: 450 d = m.groupdict() 451 prefix = d['prefix'] 452 converter = self.value_converters.get(prefix, None) 453 if converter: 454 suffix = d['suffix'] 455 converter = getattr(self, converter) 456 value = converter(suffix) 457 return value 458 459 def configure_custom(self, config): 460 """Configure an object with a user-supplied factory.""" 461 c = config.pop('()') 462 if not callable(c): 463 c = self.resolve(c) 464 props = config.pop('.', None) 465 # Check for valid identifiers 466 kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) 467 result = c(**kwargs) 468 if props: 469 for name, value in props.items(): 470 setattr(result, name, value) 471 return result 472 473 def as_tuple(self, value): 474 """Utility function which converts lists to tuples.""" 475 if isinstance(value, list): 476 value = tuple(value) 477 return value 478 479 class DictConfigurator(BaseConfigurator): 480 """ 481 Configure logging using a dictionary-like object to describe the 482 configuration. 483 """ 484 485 def configure(self): 486 """Do the configuration.""" 487 488 config = self.config 489 if 'version' not in config: 490 raise ValueError("dictionary doesn't specify a version") 491 if config['version'] != 1: 492 raise ValueError("Unsupported version: %s" % config['version']) 493 incremental = config.pop('incremental', False) 494 EMPTY_DICT = {} 495 logging._acquireLock() 496 try: 497 if incremental: 498 handlers = config.get('handlers', EMPTY_DICT) 499 for name in handlers: 500 if name not in logging._handlers: 501 raise ValueError('No handler found with ' 502 'name %r' % name) 503 else: 504 try: 505 handler = logging._handlers[name] 506 handler_config = handlers[name] 507 level = handler_config.get('level', None) 508 if level: 509 handler.setLevel(logging._checkLevel(level)) 510 except Exception as e: 511 raise ValueError('Unable to configure handler ' 512 '%r: %s' % (name, e)) 513 loggers = config.get('loggers', EMPTY_DICT) 514 for name in loggers: 515 try: 516 self.configure_logger(name, loggers[name], True) 517 except Exception as e: 518 raise ValueError('Unable to configure logger ' 519 '%r: %s' % (name, e)) 520 root = config.get('root', None) 521 if root: 522 try: 523 self.configure_root(root, True) 524 except Exception as e: 525 raise ValueError('Unable to configure root ' 526 'logger: %s' % e) 527 else: 528 disable_existing = config.pop('disable_existing_loggers', True) 529 530 logging._handlers.clear() 531 del logging._handlerList[:] 532 533 # Do formatters first - they don't refer to anything else 534 formatters = config.get('formatters', EMPTY_DICT) 535 for name in formatters: 536 try: 537 formatters[name] = self.configure_formatter( 538 formatters[name]) 539 except Exception as e: 540 raise ValueError('Unable to configure ' 541 'formatter %r: %s' % (name, e)) 542 # Next, do filters - they don't refer to anything else, either 543 filters = config.get('filters', EMPTY_DICT) 544 for name in filters: 545 try: 546 filters[name] = self.configure_filter(filters[name]) 547 except Exception as e: 548 raise ValueError('Unable to configure ' 549 'filter %r: %s' % (name, e)) 550 551 # Next, do handlers - they refer to formatters and filters 552 # As handlers can refer to other handlers, sort the keys 553 # to allow a deterministic order of configuration 554 handlers = config.get('handlers', EMPTY_DICT) 555 deferred = [] 556 for name in sorted(handlers): 557 try: 558 handler = self.configure_handler(handlers[name]) 559 handler.name = name 560 handlers[name] = handler 561 except Exception as e: 562 if 'target not configured yet' in str(e): 563 deferred.append(name) 564 else: 565 raise ValueError('Unable to configure handler ' 566 '%r: %s' % (name, e)) 567 568 # Now do any that were deferred 569 for name in deferred: 570 try: 571 handler = self.configure_handler(handlers[name]) 572 handler.name = name 573 handlers[name] = handler 574 except Exception as e: 575 raise ValueError('Unable to configure handler ' 576 '%r: %s' % (name, e)) 577 578 # Next, do loggers - they refer to handlers and filters 579 580 #we don't want to lose the existing loggers, 581 #since other threads may have pointers to them. 582 #existing is set to contain all existing loggers, 583 #and as we go through the new configuration we 584 #remove any which are configured. At the end, 585 #what's left in existing is the set of loggers 586 #which were in the previous configuration but 587 #which are not in the new configuration. 588 root = logging.root 589 existing = list(root.manager.loggerDict.keys()) 590 #The list needs to be sorted so that we can 591 #avoid disabling child loggers of explicitly 592 #named loggers. With a sorted list it is easier 593 #to find the child loggers. 594 existing.sort() 595 #We'll keep the list of existing loggers 596 #which are children of named loggers here... 597 child_loggers = [] 598 #now set up the new ones... 599 loggers = config.get('loggers', EMPTY_DICT) 600 for name in loggers: 601 if name in existing: 602 i = existing.index(name) + 1 # look after name 603 prefixed = name + "." 604 pflen = len(prefixed) 605 num_existing = len(existing) 606 while i < num_existing: 607 if existing[i][:pflen] == prefixed: 608 child_loggers.append(existing[i]) 609 i += 1 610 existing.remove(name) 611 try: 612 self.configure_logger(name, loggers[name]) 613 except Exception as e: 614 raise ValueError('Unable to configure logger ' 615 '%r: %s' % (name, e)) 616 617 #Disable any old loggers. There's no point deleting 618 #them as other threads may continue to hold references 619 #and by disabling them, you stop them doing any logging. 620 #However, don't disable children of named loggers, as that's 621 #probably not what was intended by the user. 622 #for log in existing: 623 # logger = root.manager.loggerDict[log] 624 # if log in child_loggers: 625 # logger.level = logging.NOTSET 626 # logger.handlers = [] 627 # logger.propagate = True 628 # elif disable_existing: 629 # logger.disabled = True 630 _handle_existing_loggers(existing, child_loggers, 631 disable_existing) 632 633 # And finally, do the root logger 634 root = config.get('root', None) 635 if root: 636 try: 637 self.configure_root(root) 638 except Exception as e: 639 raise ValueError('Unable to configure root ' 640 'logger: %s' % e) 641 finally: 642 logging._releaseLock() 643 644 def configure_formatter(self, config): 645 """Configure a formatter from a dictionary.""" 646 if '()' in config: 647 factory = config['()'] # for use in exception handler 648 try: 649 result = self.configure_custom(config) 650 except TypeError as te: 651 if "'format'" not in str(te): 652 raise 653 #Name of parameter changed from fmt to format. 654 #Retry with old name. 655 #This is so that code can be used with older Python versions 656 #(e.g. by Django) 657 config['fmt'] = config.pop('format') 658 config['()'] = factory 659 result = self.configure_custom(config) 660 else: 661 fmt = config.get('format', None) 662 dfmt = config.get('datefmt', None) 663 style = config.get('style', '%') 664 cname = config.get('class', None) 665 if not cname: 666 c = logging.Formatter 667 else: 668 c = _resolve(cname) 669 result = c(fmt, dfmt, style) 670 return result 671 672 def configure_filter(self, config): 673 """Configure a filter from a dictionary.""" 674 if '()' in config: 675 result = self.configure_custom(config) 676 else: 677 name = config.get('name', '') 678 result = logging.Filter(name) 679 return result 680 681 def add_filters(self, filterer, filters): 682 """Add filters to a filterer from a list of names.""" 683 for f in filters: 684 try: 685 filterer.addFilter(self.config['filters'][f]) 686 except Exception as e: 687 raise ValueError('Unable to add filter %r: %s' % (f, e)) 688 689 def configure_handler(self, config): 690 """Configure a handler from a dictionary.""" 691 config_copy = dict(config) # for restoring in case of error 692 formatter = config.pop('formatter', None) 693 if formatter: 694 try: 695 formatter = self.config['formatters'][formatter] 696 except Exception as e: 697 raise ValueError('Unable to set formatter ' 698 '%r: %s' % (formatter, e)) 699 level = config.pop('level', None) 700 filters = config.pop('filters', None) 701 if '()' in config: 702 c = config.pop('()') 703 if not callable(c): 704 c = self.resolve(c) 705 factory = c 706 else: 707 cname = config.pop('class') 708 klass = self.resolve(cname) 709 #Special case for handler which refers to another handler 710 if issubclass(klass, logging.handlers.MemoryHandler) and\ 711 'target' in config: 712 try: 713 th = self.config['handlers'][config['target']] 714 if not isinstance(th, logging.Handler): 715 config.update(config_copy) # restore for deferred cfg 716 raise TypeError('target not configured yet') 717 config['target'] = th 718 except Exception as e: 719 raise ValueError('Unable to set target handler ' 720 '%r: %s' % (config['target'], e)) 721 elif issubclass(klass, logging.handlers.SMTPHandler) and\ 722 'mailhost' in config: 723 config['mailhost'] = self.as_tuple(config['mailhost']) 724 elif issubclass(klass, logging.handlers.SysLogHandler) and\ 725 'address' in config: 726 config['address'] = self.as_tuple(config['address']) 727 factory = klass 728 props = config.pop('.', None) 729 kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) 730 try: 731 result = factory(**kwargs) 732 except TypeError as te: 733 if "'stream'" not in str(te): 734 raise 735 #The argument name changed from strm to stream 736 #Retry with old name. 737 #This is so that code can be used with older Python versions 738 #(e.g. by Django) 739 kwargs['strm'] = kwargs.pop('stream') 740 result = factory(**kwargs) 741 if formatter: 742 result.setFormatter(formatter) 743 if level is not None: 744 result.setLevel(logging._checkLevel(level)) 745 if filters: 746 self.add_filters(result, filters) 747 if props: 748 for name, value in props.items(): 749 setattr(result, name, value) 750 return result 751 752 def add_handlers(self, logger, handlers): 753 """Add handlers to a logger from a list of names.""" 754 for h in handlers: 755 try: 756 logger.addHandler(self.config['handlers'][h]) 757 except Exception as e: 758 raise ValueError('Unable to add handler %r: %s' % (h, e)) 759 760 def common_logger_config(self, logger, config, incremental=False): 761 """ 762 Perform configuration which is common to root and non-root loggers. 763 """ 764 level = config.get('level', None) 765 if level is not None: 766 logger.setLevel(logging._checkLevel(level)) 767 if not incremental: 768 #Remove any existing handlers 769 for h in logger.handlers[:]: 770 logger.removeHandler(h) 771 handlers = config.get('handlers', None) 772 if handlers: 773 self.add_handlers(logger, handlers) 774 filters = config.get('filters', None) 775 if filters: 776 self.add_filters(logger, filters) 777 778 def configure_logger(self, name, config, incremental=False): 779 """Configure a non-root logger from a dictionary.""" 780 logger = logging.getLogger(name) 781 self.common_logger_config(logger, config, incremental) 782 propagate = config.get('propagate', None) 783 if propagate is not None: 784 logger.propagate = propagate 785 786 def configure_root(self, config, incremental=False): 787 """Configure a root logger from a dictionary.""" 788 root = logging.getLogger() 789 self.common_logger_config(root, config, incremental) 790 791 dictConfigClass = DictConfigurator 792 793 def dictConfig(config): 794 """Configure logging using a dictionary.""" 795 dictConfigClass(config).configure() 796 797 798 def listen(port=DEFAULT_LOGGING_CONFIG_PORT, verify=None): 799 """ 800 Start up a socket server on the specified port, and listen for new 801 configurations. 802 803 These will be sent as a file suitable for processing by fileConfig(). 804 Returns a Thread object on which you can call start() to start the server, 805 and which you can join() when appropriate. To stop the server, call 806 stopListening(). 807 808 Use the ``verify`` argument to verify any bytes received across the wire 809 from a client. If specified, it should be a callable which receives a 810 single argument - the bytes of configuration data received across the 811 network - and it should return either ``None``, to indicate that the 812 passed in bytes could not be verified and should be discarded, or a 813 byte string which is then passed to the configuration machinery as 814 normal. Note that you can return transformed bytes, e.g. by decrypting 815 the bytes passed in. 816 """ 817 if not thread: #pragma: no cover 818 raise NotImplementedError("listen() needs threading to work") 819 820 class ConfigStreamHandler(StreamRequestHandler): 821 """ 822 Handler for a logging configuration request. 823 824 It expects a completely new logging configuration and uses fileConfig 825 to install it. 826 """ 827 def handle(self): 828 """ 829 Handle a request. 830 831 Each request is expected to be a 4-byte length, packed using 832 struct.pack(">L", n), followed by the config file. 833 Uses fileConfig() to do the grunt work. 834 """ 835 try: 836 conn = self.connection 837 chunk = conn.recv(4) 838 if len(chunk) == 4: 839 slen = struct.unpack(">L", chunk)[0] 840 chunk = self.connection.recv(slen) 841 while len(chunk) < slen: 842 chunk = chunk + conn.recv(slen - len(chunk)) 843 if self.server.verify is not None: 844 chunk = self.server.verify(chunk) 845 if chunk is not None: # verified, can process 846 chunk = chunk.decode("utf-8") 847 try: 848 import json 849 d =json.loads(chunk) 850 assert isinstance(d, dict) 851 dictConfig(d) 852 except Exception: 853 #Apply new configuration. 854 855 file = io.StringIO(chunk) 856 try: 857 fileConfig(file) 858 except Exception: 859 traceback.print_exc() 860 if self.server.ready: 861 self.server.ready.set() 862 except OSError as e: 863 if e.errno != RESET_ERROR: 864 raise 865 866 class ConfigSocketReceiver(ThreadingTCPServer): 867 """ 868 A simple TCP socket-based logging config receiver. 869 """ 870 871 allow_reuse_address = 1 872 873 def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT, 874 handler=None, ready=None, verify=None): 875 ThreadingTCPServer.__init__(self, (host, port), handler) 876 logging._acquireLock() 877 self.abort = 0 878 logging._releaseLock() 879 self.timeout = 1 880 self.ready = ready 881 self.verify = verify 882 883 def serve_until_stopped(self): 884 import select 885 abort = 0 886 while not abort: 887 rd, wr, ex = select.select([self.socket.fileno()], 888 [], [], 889 self.timeout) 890 if rd: 891 self.handle_request() 892 logging._acquireLock() 893 abort = self.abort 894 logging._releaseLock() 895 self.socket.close() 896 897 class Server(threading.Thread): 898 899 def __init__(self, rcvr, hdlr, port, verify): 900 super(Server, self).__init__() 901 self.rcvr = rcvr 902 self.hdlr = hdlr 903 self.port = port 904 self.verify = verify 905 self.ready = threading.Event() 906 907 def run(self): 908 server = self.rcvr(port=self.port, handler=self.hdlr, 909 ready=self.ready, 910 verify=self.verify) 911 if self.port == 0: 912 self.port = server.server_address[1] 913 self.ready.set() 914 global _listener 915 logging._acquireLock() 916 _listener = server 917 logging._releaseLock() 918 server.serve_until_stopped() 919 920 return Server(ConfigSocketReceiver, ConfigStreamHandler, port, verify) 921 922 def stopListening(): 923 """ 924 Stop the listening server which was created with a call to listen(). 925 """ 926 global _listener 927 logging._acquireLock() 928 try: 929 if _listener: 930 _listener.abort = 1 931 _listener = None 932 finally: 933 logging._releaseLock()
1 # Copyright 2001-2016 by Vinay Sajip. All Rights Reserved. 2 # 3 # Permission to use, copy, modify, and distribute this software and its 4 # documentation for any purpose and without fee is hereby granted, 5 # provided that the above copyright notice appear in all copies and that 6 # both that copyright notice and this permission notice appear in 7 # supporting documentation, and that the name of Vinay Sajip 8 # not be used in advertising or publicity pertaining to distribution 9 # of the software without specific, written prior permission. 10 # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING 11 # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL 12 # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR 13 # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER 14 # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 15 # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 17 """ 18 Additional handlers for the logging package for Python. The core package is 19 based on PEP 282 and comments thereto in comp.lang.python. 20 21 Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved. 22 23 To use, simply 'import logging.handlers' and log away! 24 """ 25 26 import logging, socket, os, pickle, struct, time, re 27 from stat import ST_DEV, ST_INO, ST_MTIME 28 import queue 29 try: 30 import threading 31 except ImportError: #pragma: no cover 32 threading = None 33 34 # 35 # Some constants... 36 # 37 38 DEFAULT_TCP_LOGGING_PORT = 9020 39 DEFAULT_UDP_LOGGING_PORT = 9021 40 DEFAULT_HTTP_LOGGING_PORT = 9022 41 DEFAULT_SOAP_LOGGING_PORT = 9023 42 SYSLOG_UDP_PORT = 514 43 SYSLOG_TCP_PORT = 514 44 45 _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day 46 47 class BaseRotatingHandler(logging.FileHandler): 48 """ 49 Base class for handlers that rotate log files at a certain point. 50 Not meant to be instantiated directly. Instead, use RotatingFileHandler 51 or TimedRotatingFileHandler. 52 """ 53 def __init__(self, filename, mode, encoding=None, delay=False): 54 """ 55 Use the specified filename for streamed logging 56 """ 57 logging.FileHandler.__init__(self, filename, mode, encoding, delay) 58 self.mode = mode 59 self.encoding = encoding 60 self.namer = None 61 self.rotator = None 62 63 def emit(self, record): 64 """ 65 Emit a record. 66 67 Output the record to the file, catering for rollover as described 68 in doRollover(). 69 """ 70 try: 71 if self.shouldRollover(record): 72 self.doRollover() 73 logging.FileHandler.emit(self, record) 74 except Exception: 75 self.handleError(record) 76 77 def rotation_filename(self, default_name): 78 """ 79 Modify the filename of a log file when rotating. 80 81 This is provided so that a custom filename can be provided. 82 83 The default implementation calls the 'namer' attribute of the 84 handler, if it's callable, passing the default name to 85 it. If the attribute isn't callable (the default is None), the name 86 is returned unchanged. 87 88 :param default_name: The default name for the log file. 89 """ 90 if not callable(self.namer): 91 result = default_name 92 else: 93 result = self.namer(default_name) 94 return result 95 96 def rotate(self, source, dest): 97 """ 98 When rotating, rotate the current log. 99 100 The default implementation calls the 'rotator' attribute of the 101 handler, if it's callable, passing the source and dest arguments to 102 it. If the attribute isn't callable (the default is None), the source 103 is simply renamed to the destination. 104 105 :param source: The source filename. This is normally the base 106 filename, e.g. 'test.log' 107 :param dest: The destination filename. This is normally 108 what the source is rotated to, e.g. 'test.log.1'. 109 """ 110 if not callable(self.rotator): 111 # Issue 18940: A file may not have been created if delay is True. 112 if os.path.exists(source): 113 os.rename(source, dest) 114 else: 115 self.rotator(source, dest) 116 117 class RotatingFileHandler(BaseRotatingHandler): 118 """ 119 Handler for logging to a set of files, which switches from one file 120 to the next when the current file reaches a certain size. 121 """ 122 def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False): 123 """ 124 Open the specified file and use it as the stream for logging. 125 126 By default, the file grows indefinitely. You can specify particular 127 values of maxBytes and backupCount to allow the file to rollover at 128 a predetermined size. 129 130 Rollover occurs whenever the current log file is nearly maxBytes in 131 length. If backupCount is >= 1, the system will successively create 132 new files with the same pathname as the base file, but with extensions 133 ".1", ".2" etc. appended to it. For example, with a backupCount of 5 134 and a base file name of "app.log", you would get "app.log", 135 "app.log.1", "app.log.2", ... through to "app.log.5". The file being 136 written to is always "app.log" - when it gets filled up, it is closed 137 and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. 138 exist, then they are renamed to "app.log.2", "app.log.3" etc. 139 respectively. 140 141 If maxBytes is zero, rollover never occurs. 142 """ 143 # If rotation/rollover is wanted, it doesn't make sense to use another 144 # mode. If for example 'w' were specified, then if there were multiple 145 # runs of the calling application, the logs from previous runs would be 146 # lost if the 'w' is respected, because the log file would be truncated 147 # on each run. 148 if maxBytes > 0: 149 mode = 'a' 150 BaseRotatingHandler.__init__(self, filename, mode, encoding, delay) 151 self.maxBytes = maxBytes 152 self.backupCount = backupCount 153 154 def doRollover(self): 155 """ 156 Do a rollover, as described in __init__(). 157 """ 158 if self.stream: 159 self.stream.close() 160 self.stream = None 161 if self.backupCount > 0: 162 for i in range(self.backupCount - 1, 0, -1): 163 sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i)) 164 dfn = self.rotation_filename("%s.%d" % (self.baseFilename, 165 i + 1)) 166 if os.path.exists(sfn): 167 if os.path.exists(dfn): 168 os.remove(dfn) 169 os.rename(sfn, dfn) 170 dfn = self.rotation_filename(self.baseFilename + ".1") 171 if os.path.exists(dfn): 172 os.remove(dfn) 173 self.rotate(self.baseFilename, dfn) 174 if not self.delay: 175 self.stream = self._open() 176 177 def shouldRollover(self, record): 178 """ 179 Determine if rollover should occur. 180 181 Basically, see if the supplied record would cause the file to exceed 182 the size limit we have. 183 """ 184 if self.stream is None: # delay was set... 185 self.stream = self._open() 186 if self.maxBytes > 0: # are we rolling over? 187 msg = "%s\n" % self.format(record) 188 self.stream.seek(0, 2) #due to non-posix-compliant Windows feature 189 if self.stream.tell() + len(msg) >= self.maxBytes: 190 return 1 191 return 0 192 193 class TimedRotatingFileHandler(BaseRotatingHandler): 194 """ 195 Handler for logging to a file, rotating the log file at certain timed 196 intervals. 197 198 If backupCount is > 0, when rollover is done, no more than backupCount 199 files are kept - the oldest ones are deleted. 200 """ 201 def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None): 202 BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay) 203 self.when = when.upper() 204 self.backupCount = backupCount 205 self.utc = utc 206 self.atTime = atTime 207 # Calculate the real rollover interval, which is just the number of 208 # seconds between rollovers. Also set the filename suffix used when 209 # a rollover occurs. Current 'when' events supported: 210 # S - Seconds 211 # M - Minutes 212 # H - Hours 213 # D - Days 214 # midnight - roll over at midnight 215 # W{0-6} - roll over on a certain day; 0 - Monday 216 # 217 # Case of the 'when' specifier is not important; lower or upper case 218 # will work. 219 if self.when == 'S': 220 self.interval = 1 # one second 221 self.suffix = "%Y-%m-%d_%H-%M-%S" 222 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$" 223 elif self.when == 'M': 224 self.interval = 60 # one minute 225 self.suffix = "%Y-%m-%d_%H-%M" 226 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$" 227 elif self.when == 'H': 228 self.interval = 60 * 60 # one hour 229 self.suffix = "%Y-%m-%d_%H" 230 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$" 231 elif self.when == 'D' or self.when == 'MIDNIGHT': 232 self.interval = 60 * 60 * 24 # one day 233 self.suffix = "%Y-%m-%d" 234 self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" 235 elif self.when.startswith('W'): 236 self.interval = 60 * 60 * 24 * 7 # one week 237 if len(self.when) != 2: 238 raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when) 239 if self.when[1] < '0' or self.when[1] > '6': 240 raise ValueError("Invalid day specified for weekly rollover: %s" % self.when) 241 self.dayOfWeek = int(self.when[1]) 242 self.suffix = "%Y-%m-%d" 243 self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" 244 else: 245 raise ValueError("Invalid rollover interval specified: %s" % self.when) 246 247 self.extMatch = re.compile(self.extMatch, re.ASCII) 248 self.interval = self.interval * interval # multiply by units requested 249 # The following line added because the filename passed in could be a 250 # path object (see Issue #27493), but self.baseFilename will be a string 251 filename = self.baseFilename 252 if os.path.exists(filename): 253 t = os.stat(filename)[ST_MTIME] 254 else: 255 t = int(time.time()) 256 self.rolloverAt = self.computeRollover(t) 257 258 def computeRollover(self, currentTime): 259 """ 260 Work out the rollover time based on the specified time. 261 """ 262 result = currentTime + self.interval 263 # If we are rolling over at midnight or weekly, then the interval is already known. 264 # What we need to figure out is WHEN the next interval is. In other words, 265 # if you are rolling over at midnight, then your base interval is 1 day, 266 # but you want to start that one day clock at midnight, not now. So, we 267 # have to fudge the rolloverAt value in order to trigger the first rollover 268 # at the right time. After that, the regular interval will take care of 269 # the rest. Note that this code doesn't care about leap seconds. :) 270 if self.when == 'MIDNIGHT' or self.when.startswith('W'): 271 # This could be done with less code, but I wanted it to be clear 272 if self.utc: 273 t = time.gmtime(currentTime) 274 else: 275 t = time.localtime(currentTime) 276 currentHour = t[3] 277 currentMinute = t[4] 278 currentSecond = t[5] 279 currentDay = t[6] 280 # r is the number of seconds left between now and the next rotation 281 if self.atTime is None: 282 rotate_ts = _MIDNIGHT 283 else: 284 rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 + 285 self.atTime.second) 286 287 r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 + 288 currentSecond) 289 if r < 0: 290 # Rotate time is before the current time (for example when 291 # self.rotateAt is 13:45 and it now 14:15), rotation is 292 # tomorrow. 293 r += _MIDNIGHT 294 currentDay = (currentDay + 1) % 7 295 result = currentTime + r 296 # If we are rolling over on a certain day, add in the number of days until 297 # the next rollover, but offset by 1 since we just calculated the time 298 # until the next day starts. There are three cases: 299 # Case 1) The day to rollover is today; in this case, do nothing 300 # Case 2) The day to rollover is further in the interval (i.e., today is 301 # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to 302 # next rollover is simply 6 - 2 - 1, or 3. 303 # Case 3) The day to rollover is behind us in the interval (i.e., today 304 # is day 5 (Saturday) and rollover is on day 3 (Thursday). 305 # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the 306 # number of days left in the current week (1) plus the number 307 # of days in the next week until the rollover day (3). 308 # The calculations described in 2) and 3) above need to have a day added. 309 # This is because the above time calculation takes us to midnight on this 310 # day, i.e. the start of the next day. 311 if self.when.startswith('W'): 312 day = currentDay # 0 is Monday 313 if day != self.dayOfWeek: 314 if day < self.dayOfWeek: 315 daysToWait = self.dayOfWeek - day 316 else: 317 daysToWait = 6 - day + self.dayOfWeek + 1 318 newRolloverAt = result + (daysToWait * (60 * 60 * 24)) 319 if not self.utc: 320 dstNow = t[-1] 321 dstAtRollover = time.localtime(newRolloverAt)[-1] 322 if dstNow != dstAtRollover: 323 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour 324 addend = -3600 325 else: # DST bows out before next rollover, so we need to add an hour 326 addend = 3600 327 newRolloverAt += addend 328 result = newRolloverAt 329 return result 330 331 def shouldRollover(self, record): 332 """ 333 Determine if rollover should occur. 334 335 record is not used, as we are just comparing times, but it is needed so 336 the method signatures are the same 337 """ 338 t = int(time.time()) 339 if t >= self.rolloverAt: 340 return 1 341 return 0 342 343 def getFilesToDelete(self): 344 """ 345 Determine the files to delete when rolling over. 346 347 More specific than the earlier method, which just used glob.glob(). 348 """ 349 dirName, baseName = os.path.split(self.baseFilename) 350 fileNames = os.listdir(dirName) 351 result = [] 352 prefix = baseName + "." 353 plen = len(prefix) 354 for fileName in fileNames: 355 if fileName[:plen] == prefix: 356 suffix = fileName[plen:] 357 if self.extMatch.match(suffix): 358 result.append(os.path.join(dirName, fileName)) 359 if len(result) < self.backupCount: 360 result = [] 361 else: 362 result.sort() 363 result = result[:len(result) - self.backupCount] 364 return result 365 366 def doRollover(self): 367 """ 368 do a rollover; in this case, a date/time stamp is appended to the filename 369 when the rollover happens. However, you want the file to be named for the 370 start of the interval, not the current time. If there is a backup count, 371 then we have to get a list of matching filenames, sort them and remove 372 the one with the oldest suffix. 373 """ 374 if self.stream: 375 self.stream.close() 376 self.stream = None 377 # get the time that this sequence started at and make it a TimeTuple 378 currentTime = int(time.time()) 379 dstNow = time.localtime(currentTime)[-1] 380 t = self.rolloverAt - self.interval 381 if self.utc: 382 timeTuple = time.gmtime(t) 383 else: 384 timeTuple = time.localtime(t) 385 dstThen = timeTuple[-1] 386 if dstNow != dstThen: 387 if dstNow: 388 addend = 3600 389 else: 390 addend = -3600 391 timeTuple = time.localtime(t + addend) 392 dfn = self.rotation_filename(self.baseFilename + "." + 393 time.strftime(self.suffix, timeTuple)) 394 if os.path.exists(dfn): 395 os.remove(dfn) 396 self.rotate(self.baseFilename, dfn) 397 if self.backupCount > 0: 398 for s in self.getFilesToDelete(): 399 os.remove(s) 400 if not self.delay: 401 self.stream = self._open() 402 newRolloverAt = self.computeRollover(currentTime) 403 while newRolloverAt <= currentTime: 404 newRolloverAt = newRolloverAt + self.interval 405 #If DST changes and midnight or weekly rollover, adjust for this. 406 if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc: 407 dstAtRollover = time.localtime(newRolloverAt)[-1] 408 if dstNow != dstAtRollover: 409 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour 410 addend = -3600 411 else: # DST bows out before next rollover, so we need to add an hour 412 addend = 3600 413 newRolloverAt += addend 414 self.rolloverAt = newRolloverAt 415 416 class WatchedFileHandler(logging.FileHandler): 417 """ 418 A handler for logging to a file, which watches the file 419 to see if it has changed while in use. This can happen because of 420 usage of programs such as newsyslog and logrotate which perform 421 log file rotation. This handler, intended for use under Unix, 422 watches the file to see if it has changed since the last emit. 423 (A file has changed if its device or inode have changed.) 424 If it has changed, the old file stream is closed, and the file 425 opened to get a new stream. 426 427 This handler is not appropriate for use under Windows, because 428 under Windows open files cannot be moved or renamed - logging 429 opens the files with exclusive locks - and so there is no need 430 for such a handler. Furthermore, ST_INO is not supported under 431 Windows; stat always returns zero for this value. 432 433 This handler is based on a suggestion and patch by Chad J. 434 Schroeder. 435 """ 436 def __init__(self, filename, mode='a', encoding=None, delay=False): 437 logging.FileHandler.__init__(self, filename, mode, encoding, delay) 438 self.dev, self.ino = -1, -1 439 self._statstream() 440 441 def _statstream(self): 442 if self.stream: 443 sres = os.fstat(self.stream.fileno()) 444 self.dev, self.ino = sres[ST_DEV], sres[ST_INO] 445 446 def reopenIfNeeded(self): 447 """ 448 Reopen log file if needed. 449 450 Checks if the underlying file has changed, and if it 451 has, close the old stream and reopen the file to get the 452 current stream. 453 """ 454 # Reduce the chance of race conditions by stat'ing by path only 455 # once and then fstat'ing our new fd if we opened a new log stream. 456 # See issue #14632: Thanks to John Mulligan for the problem report 457 # and patch. 458 try: 459 # stat the file by path, checking for existence 460 sres = os.stat(self.baseFilename) 461 except FileNotFoundError: 462 sres = None 463 # compare file system stat with that of our stream file handle 464 if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino: 465 if self.stream is not None: 466 # we have an open file handle, clean it up 467 self.stream.flush() 468 self.stream.close() 469 self.stream = None # See Issue #21742: _open () might fail. 470 # open a new file handle and get new stat info from that fd 471 self.stream = self._open() 472 self._statstream() 473 474 def emit(self, record): 475 """ 476 Emit a record. 477 478 If underlying file has changed, reopen the file before emitting the 479 record to it. 480 """ 481 self.reopenIfNeeded() 482 logging.FileHandler.emit(self, record) 483 484 485 class SocketHandler(logging.Handler): 486 """ 487 A handler class which writes logging records, in pickle format, to 488 a streaming socket. The socket is kept open across logging calls. 489 If the peer resets it, an attempt is made to reconnect on the next call. 490 The pickle which is sent is that of the LogRecord's attribute dictionary 491 (__dict__), so that the receiver does not need to have the logging module 492 installed in order to process the logging event. 493 494 To unpickle the record at the receiving end into a LogRecord, use the 495 makeLogRecord function. 496 """ 497 498 def __init__(self, host, port): 499 """ 500 Initializes the handler with a specific host address and port. 501 502 When the attribute *closeOnError* is set to True - if a socket error 503 occurs, the socket is silently closed and then reopened on the next 504 logging call. 505 """ 506 logging.Handler.__init__(self) 507 self.host = host 508 self.port = port 509 if port is None: 510 self.address = host 511 else: 512 self.address = (host, port) 513 self.sock = None 514 self.closeOnError = False 515 self.retryTime = None 516 # 517 # Exponential backoff parameters. 518 # 519 self.retryStart = 1.0 520 self.retryMax = 30.0 521 self.retryFactor = 2.0 522 523 def makeSocket(self, timeout=1): 524 """ 525 A factory method which allows subclasses to define the precise 526 type of socket they want. 527 """ 528 if self.port is not None: 529 result = socket.create_connection(self.address, timeout=timeout) 530 else: 531 result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 532 result.settimeout(timeout) 533 try: 534 result.connect(self.address) 535 except OSError: 536 result.close() # Issue 19182 537 raise 538 return result 539 540 def createSocket(self): 541 """ 542 Try to create a socket, using an exponential backoff with 543 a max retry time. Thanks to Robert Olson for the original patch 544 (SF #815911) which has been slightly refactored. 545 """ 546 now = time.time() 547 # Either retryTime is None, in which case this 548 # is the first time back after a disconnect, or 549 # we've waited long enough. 550 if self.retryTime is None: 551 attempt = True 552 else: 553 attempt = (now >= self.retryTime) 554 if attempt: 555 try: 556 self.sock = self.makeSocket() 557 self.retryTime = None # next time, no delay before trying 558 except OSError: 559 #Creation failed, so set the retry time and return. 560 if self.retryTime is None: 561 self.retryPeriod = self.retryStart 562 else: 563 self.retryPeriod = self.retryPeriod * self.retryFactor 564 if self.retryPeriod > self.retryMax: 565 self.retryPeriod = self.retryMax 566 self.retryTime = now + self.retryPeriod 567 568 def send(self, s): 569 """ 570 Send a pickled string to the socket. 571 572 This function allows for partial sends which can happen when the 573 network is busy. 574 """ 575 if self.sock is None: 576 self.createSocket() 577 #self.sock can be None either because we haven't reached the retry 578 #time yet, or because we have reached the retry time and retried, 579 #but are still unable to connect. 580 if self.sock: 581 try: 582 self.sock.sendall(s) 583 except OSError: #pragma: no cover 584 self.sock.close() 585 self.sock = None # so we can call createSocket next time 586 587 def makePickle(self, record): 588 """ 589 Pickles the record in binary format with a length prefix, and 590 returns it ready for transmission across the socket. 591 """ 592 ei = record.exc_info 593 if ei: 594 # just to get traceback text into record.exc_text ... 595 dummy = self.format(record) 596 # See issue #14436: If msg or args are objects, they may not be 597 # available on the receiving end. So we convert the msg % args 598 # to a string, save it as msg and zap the args. 599 d = dict(record.__dict__) 600 d['msg'] = record.getMessage() 601 d['args'] = None 602 d['exc_info'] = None 603 # Issue #25685: delete 'message' if present: redundant with 'msg' 604 d.pop('message', None) 605 s = pickle.dumps(d, 1) 606 slen = struct.pack(">L", len(s)) 607 return slen + s 608 609 def handleError(self, record): 610 """ 611 Handle an error during logging. 612 613 An error has occurred during logging. Most likely cause - 614 connection lost. Close the socket so that we can retry on the 615 next event. 616 """ 617 if self.closeOnError and self.sock: 618 self.sock.close() 619 self.sock = None #try to reconnect next time 620 else: 621 logging.Handler.handleError(self, record) 622 623 def emit(self, record): 624 """ 625 Emit a record. 626 627 Pickles the record and writes it to the socket in binary format. 628 If there is an error with the socket, silently drop the packet. 629 If there was a problem with the socket, re-establishes the 630 socket. 631 """ 632 try: 633 s = self.makePickle(record) 634 self.send(s) 635 except Exception: 636 self.handleError(record) 637 638 def close(self): 639 """ 640 Closes the socket. 641 """ 642 self.acquire() 643 try: 644 sock = self.sock 645 if sock: 646 self.sock = None 647 sock.close() 648 logging.Handler.close(self) 649 finally: 650 self.release() 651 652 class DatagramHandler(SocketHandler): 653 """ 654 A handler class which writes logging records, in pickle format, to 655 a datagram socket. The pickle which is sent is that of the LogRecord's 656 attribute dictionary (__dict__), so that the receiver does not need to 657 have the logging module installed in order to process the logging event. 658 659 To unpickle the record at the receiving end into a LogRecord, use the 660 makeLogRecord function. 661 662 """ 663 def __init__(self, host, port): 664 """ 665 Initializes the handler with a specific host address and port. 666 """ 667 SocketHandler.__init__(self, host, port) 668 self.closeOnError = False 669 670 def makeSocket(self): 671 """ 672 The factory method of SocketHandler is here overridden to create 673 a UDP socket (SOCK_DGRAM). 674 """ 675 if self.port is None: 676 family = socket.AF_UNIX 677 else: 678 family = socket.AF_INET 679 s = socket.socket(family, socket.SOCK_DGRAM) 680 return s 681 682 def send(self, s): 683 """ 684 Send a pickled string to a socket. 685 686 This function no longer allows for partial sends which can happen 687 when the network is busy - UDP does not guarantee delivery and 688 can deliver packets out of sequence. 689 """ 690 if self.sock is None: 691 self.createSocket() 692 self.sock.sendto(s, self.address) 693 694 class SysLogHandler(logging.Handler): 695 """ 696 A handler class which sends formatted logging records to a syslog 697 server. Based on Sam Rushing's syslog module: 698 http://www.nightmare.com/squirl/python-ext/misc/syslog.py 699 Contributed by Nicolas Untz (after which minor refactoring changes 700 have been made). 701 """ 702 703 # from <linux/sys/syslog.h>: 704 # ====================================================================== 705 # priorities/facilities are encoded into a single 32-bit quantity, where 706 # the bottom 3 bits are the priority (0-7) and the top 28 bits are the 707 # facility (0-big number). Both the priorities and the facilities map 708 # roughly one-to-one to strings in the syslogd(8) source code. This 709 # mapping is included in this file. 710 # 711 # priorities (these are ordered) 712 713 LOG_EMERG = 0 # system is unusable 714 LOG_ALERT = 1 # action must be taken immediately 715 LOG_CRIT = 2 # critical conditions 716 LOG_ERR = 3 # error conditions 717 LOG_WARNING = 4 # warning conditions 718 LOG_NOTICE = 5 # normal but significant condition 719 LOG_INFO = 6 # informational 720 LOG_DEBUG = 7 # debug-level messages 721 722 # facility codes 723 LOG_KERN = 0 # kernel messages 724 LOG_USER = 1 # random user-level messages 725 LOG_MAIL = 2 # mail system 726 LOG_DAEMON = 3 # system daemons 727 LOG_AUTH = 4 # security/authorization messages 728 LOG_SYSLOG = 5 # messages generated internally by syslogd 729 LOG_LPR = 6 # line printer subsystem 730 LOG_NEWS = 7 # network news subsystem 731 LOG_UUCP = 8 # UUCP subsystem 732 LOG_CRON = 9 # clock daemon 733 LOG_AUTHPRIV = 10 # security/authorization messages (private) 734 LOG_FTP = 11 # FTP daemon 735 736 # other codes through 15 reserved for system use 737 LOG_LOCAL0 = 16 # reserved for local use 738 LOG_LOCAL1 = 17 # reserved for local use 739 LOG_LOCAL2 = 18 # reserved for local use 740 LOG_LOCAL3 = 19 # reserved for local use 741 LOG_LOCAL4 = 20 # reserved for local use 742 LOG_LOCAL5 = 21 # reserved for local use 743 LOG_LOCAL6 = 22 # reserved for local use 744 LOG_LOCAL7 = 23 # reserved for local use 745 746 priority_names = { 747 "alert": LOG_ALERT, 748 "crit": LOG_CRIT, 749 "critical": LOG_CRIT, 750 "debug": LOG_DEBUG, 751 "emerg": LOG_EMERG, 752 "err": LOG_ERR, 753 "error": LOG_ERR, # DEPRECATED 754 "info": LOG_INFO, 755 "notice": LOG_NOTICE, 756 "panic": LOG_EMERG, # DEPRECATED 757 "warn": LOG_WARNING, # DEPRECATED 758 "warning": LOG_WARNING, 759 } 760 761 facility_names = { 762 "auth": LOG_AUTH, 763 "authpriv": LOG_AUTHPRIV, 764 "cron": LOG_CRON, 765 "daemon": LOG_DAEMON, 766 "ftp": LOG_FTP, 767 "kern": LOG_KERN, 768 "lpr": LOG_LPR, 769 "mail": LOG_MAIL, 770 "news": LOG_NEWS, 771 "security": LOG_AUTH, # DEPRECATED 772 "syslog": LOG_SYSLOG, 773 "user": LOG_USER, 774 "uucp": LOG_UUCP, 775 "local0": LOG_LOCAL0, 776 "local1": LOG_LOCAL1, 777 "local2": LOG_LOCAL2, 778 "local3": LOG_LOCAL3, 779 "local4": LOG_LOCAL4, 780 "local5": LOG_LOCAL5, 781 "local6": LOG_LOCAL6, 782 "local7": LOG_LOCAL7, 783 } 784 785 #The map below appears to be trivially lowercasing the key. However, 786 #there's more to it than meets the eye - in some locales, lowercasing 787 #gives unexpected results. See SF #1524081: in the Turkish locale, 788 #"INFO".lower() != "info" 789 priority_map = { 790 "DEBUG" : "debug", 791 "INFO" : "info", 792 "WARNING" : "warning", 793 "ERROR" : "error", 794 "CRITICAL" : "critical" 795 } 796 797 def __init__(self, address=('localhost', SYSLOG_UDP_PORT), 798 facility=LOG_USER, socktype=None): 799 """ 800 Initialize a handler. 801 802 If address is specified as a string, a UNIX socket is used. To log to a 803 local syslogd, "SysLogHandler(address="/dev/log")" can be used. 804 If facility is not specified, LOG_USER is used. If socktype is 805 specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific 806 socket type will be used. For Unix sockets, you can also specify a 807 socktype of None, in which case socket.SOCK_DGRAM will be used, falling 808 back to socket.SOCK_STREAM. 809 """ 810 logging.Handler.__init__(self) 811 812 self.address = address 813 self.facility = facility 814 self.socktype = socktype 815 816 if isinstance(address, str): 817 self.unixsocket = True 818 # Syslog server may be unavailable during handler initialisation. 819 # C's openlog() function also ignores connection errors. 820 # Moreover, we ignore these errors while logging, so it not worse 821 # to ignore it also here. 822 try: 823 self._connect_unixsocket(address) 824 except OSError: 825 pass 826 else: 827 self.unixsocket = False 828 if socktype is None: 829 socktype = socket.SOCK_DGRAM 830 host, port = address 831 ress = socket.getaddrinfo(host, port, 0, socktype) 832 if not ress: 833 raise OSError("getaddrinfo returns an empty list") 834 for res in ress: 835 af, socktype, proto, _, sa = res 836 err = sock = None 837 try: 838 sock = socket.socket(af, socktype, proto) 839 if socktype == socket.SOCK_STREAM: 840 sock.connect(sa) 841 break 842 except OSError as exc: 843 err = exc 844 if sock is not None: 845 sock.close() 846 if err is not None: 847 raise err 848 self.socket = sock 849 self.socktype = socktype 850 851 def _connect_unixsocket(self, address): 852 use_socktype = self.socktype 853 if use_socktype is None: 854 use_socktype = socket.SOCK_DGRAM 855 self.socket = socket.socket(socket.AF_UNIX, use_socktype) 856 try: 857 self.socket.connect(address) 858 # it worked, so set self.socktype to the used type 859 self.socktype = use_socktype 860 except OSError: 861 self.socket.close() 862 if self.socktype is not None: 863 # user didn't specify falling back, so fail 864 raise 865 use_socktype = socket.SOCK_STREAM 866 self.socket = socket.socket(socket.AF_UNIX, use_socktype) 867 try: 868 self.socket.connect(address) 869 # it worked, so set self.socktype to the used type 870 self.socktype = use_socktype 871 except OSError: 872 self.socket.close() 873 raise 874 875 def encodePriority(self, facility, priority): 876 """ 877 Encode the facility and priority. You can pass in strings or 878 integers - if strings are passed, the facility_names and 879 priority_names mapping dictionaries are used to convert them to 880 integers. 881 """ 882 if isinstance(facility, str): 883 facility = self.facility_names[facility] 884 if isinstance(priority, str): 885 priority = self.priority_names[priority] 886 return (facility << 3) | priority 887 888 def close(self): 889 """ 890 Closes the socket. 891 """ 892 self.acquire() 893 try: 894 self.socket.close() 895 logging.Handler.close(self) 896 finally: 897 self.release() 898 899 def mapPriority(self, levelName): 900 """ 901 Map a logging level name to a key in the priority_names map. 902 This is useful in two scenarios: when custom levels are being 903 used, and in the case where you can't do a straightforward 904 mapping by lowercasing the logging level name because of locale- 905 specific issues (see SF #1524081). 906 """ 907 return self.priority_map.get(levelName, "warning") 908 909 ident = '' # prepended to all messages 910 append_nul = True # some old syslog daemons expect a NUL terminator 911 912 def emit(self, record): 913 """ 914 Emit a record. 915 916 The record is formatted, and then sent to the syslog server. If 917 exception information is present, it is NOT sent to the server. 918 """ 919 try: 920 msg = self.format(record) 921 if self.ident: 922 msg = self.ident + msg 923 if self.append_nul: 924 msg += '\000' 925 926 # We need to convert record level to lowercase, maybe this will 927 # change in the future. 928 prio = '<%d>' % self.encodePriority(self.facility, 929 self.mapPriority(record.levelname)) 930 prio = prio.encode('utf-8') 931 # Message is a string. Convert to bytes as required by RFC 5424 932 msg = msg.encode('utf-8') 933 msg = prio + msg 934 if self.unixsocket: 935 try: 936 self.socket.send(msg) 937 except OSError: 938 self.socket.close() 939 self._connect_unixsocket(self.address) 940 self.socket.send(msg) 941 elif self.socktype == socket.SOCK_DGRAM: 942 self.socket.sendto(msg, self.address) 943 else: 944 self.socket.sendall(msg) 945 except Exception: 946 self.handleError(record) 947 948 class SMTPHandler(logging.Handler): 949 """ 950 A handler class which sends an SMTP email for each logging event. 951 """ 952 def __init__(self, mailhost, fromaddr, toaddrs, subject, 953 credentials=None, secure=None, timeout=5.0): 954 """ 955 Initialize the handler. 956 957 Initialize the instance with the from and to addresses and subject 958 line of the email. To specify a non-standard SMTP port, use the 959 (host, port) tuple format for the mailhost argument. To specify 960 authentication credentials, supply a (username, password) tuple 961 for the credentials argument. To specify the use of a secure 962 protocol (TLS), pass in a tuple for the secure argument. This will 963 only be used when authentication credentials are supplied. The tuple 964 will be either an empty tuple, or a single-value tuple with the name 965 of a keyfile, or a 2-value tuple with the names of the keyfile and 966 certificate file. (This tuple is passed to the `starttls` method). 967 A timeout in seconds can be specified for the SMTP connection (the 968 default is one second). 969 """ 970 logging.Handler.__init__(self) 971 if isinstance(mailhost, (list, tuple)): 972 self.mailhost, self.mailport = mailhost 973 else: 974 self.mailhost, self.mailport = mailhost, None 975 if isinstance(credentials, (list, tuple)): 976 self.username, self.password = credentials 977 else: 978 self.username = None 979 self.fromaddr = fromaddr 980 if isinstance(toaddrs, str): 981 toaddrs = [toaddrs] 982 self.toaddrs = toaddrs 983 self.subject = subject 984 self.secure = secure 985 self.timeout = timeout 986 987 def getSubject(self, record): 988 """ 989 Determine the subject for the email. 990 991 If you want to specify a subject line which is record-dependent, 992 override this method. 993 """ 994 return self.subject 995 996 def emit(self, record): 997 """ 998 Emit a record. 999 1000 Format the record and send it to the specified addressees. 1001 """ 1002 try: 1003 import smtplib 1004 from email.message import EmailMessage 1005 import email.utils 1006 1007 port = self.mailport 1008 if not port: 1009 port = smtplib.SMTP_PORT 1010 smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout) 1011 msg = EmailMessage() 1012 msg['From'] = self.fromaddr 1013 msg['To'] = ','.join(self.toaddrs) 1014 msg['Subject'] = self.getSubject(record) 1015 msg['Date'] = email.utils.localtime() 1016 msg.set_content(self.format(record)) 1017 if self.username: 1018 if self.secure is not None: 1019 smtp.ehlo() 1020 smtp.starttls(*self.secure) 1021 smtp.ehlo() 1022 smtp.login(self.username, self.password) 1023 smtp.send_message(msg) 1024 smtp.quit() 1025 except Exception: 1026 self.handleError(record) 1027 1028 class NTEventLogHandler(logging.Handler): 1029 """ 1030 A handler class which sends events to the NT Event Log. Adds a 1031 registry entry for the specified application name. If no dllname is 1032 provided, win32service.pyd (which contains some basic message 1033 placeholders) is used. Note that use of these placeholders will make 1034 your event logs big, as the entire message source is held in the log. 1035 If you want slimmer logs, you have to pass in the name of your own DLL 1036 which contains the message definitions you want to use in the event log. 1037 """ 1038 def __init__(self, appname, dllname=None, logtype="Application"): 1039 logging.Handler.__init__(self) 1040 try: 1041 import win32evtlogutil, win32evtlog 1042 self.appname = appname 1043 self._welu = win32evtlogutil 1044 if not dllname: 1045 dllname = os.path.split(self._welu.__file__) 1046 dllname = os.path.split(dllname[0]) 1047 dllname = os.path.join(dllname[0], r'win32service.pyd') 1048 self.dllname = dllname 1049 self.logtype = logtype 1050 self._welu.AddSourceToRegistry(appname, dllname, logtype) 1051 self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE 1052 self.typemap = { 1053 logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, 1054 logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, 1055 logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, 1056 logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, 1057 logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, 1058 } 1059 except ImportError: 1060 print("The Python Win32 extensions for NT (service, event "\ 1061 "logging) appear not to be available.") 1062 self._welu = None 1063 1064 def getMessageID(self, record): 1065 """ 1066 Return the message ID for the event record. If you are using your 1067 own messages, you could do this by having the msg passed to the 1068 logger being an ID rather than a formatting string. Then, in here, 1069 you could use a dictionary lookup to get the message ID. This 1070 version returns 1, which is the base message ID in win32service.pyd. 1071 """ 1072 return 1 1073 1074 def getEventCategory(self, record): 1075 """ 1076 Return the event category for the record. 1077 1078 Override this if you want to specify your own categories. This version 1079 returns 0. 1080 """ 1081 return 0 1082 1083 def getEventType(self, record): 1084 """ 1085 Return the event type for the record. 1086 1087 Override this if you want to specify your own types. This version does 1088 a mapping using the handler's typemap attribute, which is set up in 1089 __init__() to a dictionary which contains mappings for DEBUG, INFO, 1090 WARNING, ERROR and CRITICAL. If you are using your own levels you will 1091 either need to override this method or place a suitable dictionary in 1092 the handler's typemap attribute. 1093 """ 1094 return self.typemap.get(record.levelno, self.deftype) 1095 1096 def emit(self, record): 1097 """ 1098 Emit a record. 1099 1100 Determine the message ID, event category and event type. Then 1101 log the message in the NT event log. 1102 """ 1103 if self._welu: 1104 try: 1105 id = self.getMessageID(record) 1106 cat = self.getEventCategory(record) 1107 type = self.getEventType(record) 1108 msg = self.format(record) 1109 self._welu.ReportEvent(self.appname, id, cat, type, [msg]) 1110 except Exception: 1111 self.handleError(record) 1112 1113 def close(self): 1114 """ 1115 Clean up this handler. 1116 1117 You can remove the application name from the registry as a 1118 source of event log entries. However, if you do this, you will 1119 not be able to see the events as you intended in the Event Log 1120 Viewer - it needs to be able to access the registry to get the 1121 DLL name. 1122 """ 1123 #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) 1124 logging.Handler.close(self) 1125 1126 class HTTPHandler(logging.Handler): 1127 """ 1128 A class which sends records to a Web server, using either GET or 1129 POST semantics. 1130 """ 1131 def __init__(self, host, url, method="GET", secure=False, credentials=None, 1132 context=None): 1133 """ 1134 Initialize the instance with the host, the request URL, and the method 1135 ("GET" or "POST") 1136 """ 1137 logging.Handler.__init__(self) 1138 method = method.upper() 1139 if method not in ["GET", "POST"]: 1140 raise ValueError("method must be GET or POST") 1141 if not secure and context is not None: 1142 raise ValueError("context parameter only makes sense " 1143 "with secure=True") 1144 self.host = host 1145 self.url = url 1146 self.method = method 1147 self.secure = secure 1148 self.credentials = credentials 1149 self.context = context 1150 1151 def mapLogRecord(self, record): 1152 """ 1153 Default implementation of mapping the log record into a dict 1154 that is sent as the CGI data. Overwrite in your class. 1155 Contributed by Franz Glasner. 1156 """ 1157 return record.__dict__ 1158 1159 def emit(self, record): 1160 """ 1161 Emit a record. 1162 1163 Send the record to the Web server as a percent-encoded dictionary 1164 """ 1165 try: 1166 import http.client, urllib.parse 1167 host = self.host 1168 if self.secure: 1169 h = http.client.HTTPSConnection(host, context=self.context) 1170 else: 1171 h = http.client.HTTPConnection(host) 1172 url = self.url 1173 data = urllib.parse.urlencode(self.mapLogRecord(record)) 1174 if self.method == "GET": 1175 if (url.find('?') >= 0): 1176 sep = '&' 1177 else: 1178 sep = '?' 1179 url = url + "%c%s" % (sep, data) 1180 h.putrequest(self.method, url) 1181 # support multiple hosts on one IP address... 1182 # need to strip optional :port from host, if present 1183 i = host.find(":") 1184 if i >= 0: 1185 host = host[:i] 1186 # See issue #30904: putrequest call above already adds this header 1187 # on Python 3.x. 1188 # h.putheader("Host", host) 1189 if self.method == "POST": 1190 h.putheader("Content-type", 1191 "application/x-www-form-urlencoded") 1192 h.putheader("Content-length", str(len(data))) 1193 if self.credentials: 1194 import base64 1195 s = ('%s:%s' % self.credentials).encode('utf-8') 1196 s = 'Basic ' + base64.b64encode(s).strip().decode('ascii') 1197 h.putheader('Authorization', s) 1198 h.endheaders() 1199 if self.method == "POST": 1200 h.send(data.encode('utf-8')) 1201 h.getresponse() #can't do anything with the result 1202 except Exception: 1203 self.handleError(record) 1204 1205 class BufferingHandler(logging.Handler): 1206 """ 1207 A handler class which buffers logging records in memory. Whenever each 1208 record is added to the buffer, a check is made to see if the buffer should 1209 be flushed. If it should, then flush() is expected to do what's needed. 1210 """ 1211 def __init__(self, capacity): 1212 """ 1213 Initialize the handler with the buffer size. 1214 """ 1215 logging.Handler.__init__(self) 1216 self.capacity = capacity 1217 self.buffer = [] 1218 1219 def shouldFlush(self, record): 1220 """ 1221 Should the handler flush its buffer? 1222 1223 Returns true if the buffer is up to capacity. This method can be 1224 overridden to implement custom flushing strategies. 1225 """ 1226 return (len(self.buffer) >= self.capacity) 1227 1228 def emit(self, record): 1229 """ 1230 Emit a record. 1231 1232 Append the record. If shouldFlush() tells us to, call flush() to process 1233 the buffer. 1234 """ 1235 self.buffer.append(record) 1236 if self.shouldFlush(record): 1237 self.flush() 1238 1239 def flush(self): 1240 """ 1241 Override to implement custom flushing behaviour. 1242 1243 This version just zaps the buffer to empty. 1244 """ 1245 self.acquire() 1246 try: 1247 self.buffer = [] 1248 finally: 1249 self.release() 1250 1251 def close(self): 1252 """ 1253 Close the handler. 1254 1255 This version just flushes and chains to the parent class' close(). 1256 """ 1257 try: 1258 self.flush() 1259 finally: 1260 logging.Handler.close(self) 1261 1262 class MemoryHandler(BufferingHandler): 1263 """ 1264 A handler class which buffers logging records in memory, periodically 1265 flushing them to a target handler. Flushing occurs whenever the buffer 1266 is full, or when an event of a certain severity or greater is seen. 1267 """ 1268 def __init__(self, capacity, flushLevel=logging.ERROR, target=None, 1269 flushOnClose=True): 1270 """ 1271 Initialize the handler with the buffer size, the level at which 1272 flushing should occur and an optional target. 1273 1274 Note that without a target being set either here or via setTarget(), 1275 a MemoryHandler is no use to anyone! 1276 1277 The ``flushOnClose`` argument is ``True`` for backward compatibility 1278 reasons - the old behaviour is that when the handler is closed, the 1279 buffer is flushed, even if the flush level hasn't been exceeded nor the 1280 capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``. 1281 """ 1282 BufferingHandler.__init__(self, capacity) 1283 self.flushLevel = flushLevel 1284 self.target = target 1285 # See Issue #26559 for why this has been added 1286 self.flushOnClose = flushOnClose 1287 1288 def shouldFlush(self, record): 1289 """ 1290 Check for buffer full or a record at the flushLevel or higher. 1291 """ 1292 return (len(self.buffer) >= self.capacity) or \ 1293 (record.levelno >= self.flushLevel) 1294 1295 def setTarget(self, target): 1296 """ 1297 Set the target handler for this handler. 1298 """ 1299 self.target = target 1300 1301 def flush(self): 1302 """ 1303 For a MemoryHandler, flushing means just sending the buffered 1304 records to the target, if there is one. Override if you want 1305 different behaviour. 1306 1307 The record buffer is also cleared by this operation. 1308 """ 1309 self.acquire() 1310 try: 1311 if self.target: 1312 for record in self.buffer: 1313 self.target.handle(record) 1314 self.buffer = [] 1315 finally: 1316 self.release() 1317 1318 def close(self): 1319 """ 1320 Flush, if appropriately configured, set the target to None and lose the 1321 buffer. 1322 """ 1323 try: 1324 if self.flushOnClose: 1325 self.flush() 1326 finally: 1327 self.acquire() 1328 try: 1329 self.target = None 1330 BufferingHandler.close(self) 1331 finally: 1332 self.release() 1333 1334 1335 class QueueHandler(logging.Handler): 1336 """ 1337 This handler sends events to a queue. Typically, it would be used together 1338 with a multiprocessing Queue to centralise logging to file in one process 1339 (in a multi-process application), so as to avoid file write contention 1340 between processes. 1341 1342 This code is new in Python 3.2, but this class can be copy pasted into 1343 user code for use with earlier Python versions. 1344 """ 1345 1346 def __init__(self, queue): 1347 """ 1348 Initialise an instance, using the passed queue. 1349 """ 1350 logging.Handler.__init__(self) 1351 self.queue = queue 1352 1353 def enqueue(self, record): 1354 """ 1355 Enqueue a record. 1356 1357 The base implementation uses put_nowait. You may want to override 1358 this method if you want to use blocking, timeouts or custom queue 1359 implementations. 1360 """ 1361 self.queue.put_nowait(record) 1362 1363 def prepare(self, record): 1364 """ 1365 Prepares a record for queuing. The object returned by this method is 1366 enqueued. 1367 1368 The base implementation formats the record to merge the message 1369 and arguments, and removes unpickleable items from the record 1370 in-place. 1371 1372 You might want to override this method if you want to convert 1373 the record to a dict or JSON string, or send a modified copy 1374 of the record while leaving the original intact. 1375 """ 1376 # The format operation gets traceback text into record.exc_text 1377 # (if there's exception data), and also puts the message into 1378 # record.message. We can then use this to replace the original 1379 # msg + args, as these might be unpickleable. We also zap the 1380 # exc_info attribute, as it's no longer needed and, if not None, 1381 # will typically not be pickleable. 1382 self.format(record) 1383 record.msg = record.message 1384 record.args = None 1385 record.exc_info = None 1386 return record 1387 1388 def emit(self, record): 1389 """ 1390 Emit a record. 1391 1392 Writes the LogRecord to the queue, preparing it for pickling first. 1393 """ 1394 try: 1395 self.enqueue(self.prepare(record)) 1396 except Exception: 1397 self.handleError(record) 1398 1399 if threading: 1400 class QueueListener(object): 1401 """ 1402 This class implements an internal threaded listener which watches for 1403 LogRecords being added to a queue, removes them and passes them to a 1404 list of handlers for processing. 1405 """ 1406 _sentinel = None 1407 1408 def __init__(self, queue, *handlers, respect_handler_level=False): 1409 """ 1410 Initialise an instance with the specified queue and 1411 handlers. 1412 """ 1413 self.queue = queue 1414 self.handlers = handlers 1415 self._thread = None 1416 self.respect_handler_level = respect_handler_level 1417 1418 def dequeue(self, block): 1419 """ 1420 Dequeue a record and return it, optionally blocking. 1421 1422 The base implementation uses get. You may want to override this method 1423 if you want to use timeouts or work with custom queue implementations. 1424 """ 1425 return self.queue.get(block) 1426 1427 def start(self): 1428 """ 1429 Start the listener. 1430 1431 This starts up a background thread to monitor the queue for 1432 LogRecords to process. 1433 """ 1434 self._thread = t = threading.Thread(target=self._monitor) 1435 t.daemon = True 1436 t.start() 1437 1438 def prepare(self , record): 1439 """ 1440 Prepare a record for handling. 1441 1442 This method just returns the passed-in record. You may want to 1443 override this method if you need to do any custom marshalling or 1444 manipulation of the record before passing it to the handlers. 1445 """ 1446 return record 1447 1448 def handle(self, record): 1449 """ 1450 Handle a record. 1451 1452 This just loops through the handlers offering them the record 1453 to handle. 1454 """ 1455 record = self.prepare(record) 1456 for handler in self.handlers: 1457 if not self.respect_handler_level: 1458 process = True 1459 else: 1460 process = record.levelno >= handler.level 1461 if process: 1462 handler.handle(record) 1463 1464 def _monitor(self): 1465 """ 1466 Monitor the queue for records, and ask the handler 1467 to deal with them. 1468 1469 This method runs on a separate, internal thread. 1470 The thread will terminate if it sees a sentinel object in the queue. 1471 """ 1472 q = self.queue 1473 has_task_done = hasattr(q, 'task_done') 1474 while True: 1475 try: 1476 record = self.dequeue(True) 1477 if record is self._sentinel: 1478 break 1479 self.handle(record) 1480 if has_task_done: 1481 q.task_done() 1482 except queue.Empty: 1483 break 1484 1485 def enqueue_sentinel(self): 1486 """ 1487 This is used to enqueue the sentinel record. 1488 1489 The base implementation uses put_nowait. You may want to override this 1490 method if you want to use timeouts or work with custom queue 1491 implementations. 1492 """ 1493 self.queue.put_nowait(self._sentinel) 1494 1495 def stop(self): 1496 """ 1497 Stop the listener. 1498 1499 This asks the thread to terminate, and then waits for it to do so. 1500 Note that if you don't call this before your application exits, there 1501 may be some records still left on the queue, which won't be processed. 1502 """ 1503 self.enqueue_sentinel() 1504 self._thread.join() 1505 self._thread = None
每天更新一点点,温习一点点点,进步一点点