pytest
pytest: unit and functional testing with Python.
1# PYTHON_ARGCOMPLETE_OK 2"""pytest: unit and functional testing with Python.""" 3 4from __future__ import annotations 5 6from _pytest import __version__ 7from _pytest import version_tuple 8from _pytest._code import ExceptionInfo 9from _pytest.assertion import register_assert_rewrite 10from _pytest.cacheprovider import Cache 11from _pytest.capture import CaptureFixture 12from _pytest.config import cmdline 13from _pytest.config import Config 14from _pytest.config import console_main 15from _pytest.config import ExitCode 16from _pytest.config import hookimpl 17from _pytest.config import hookspec 18from _pytest.config import main 19from _pytest.config import PytestPluginManager 20from _pytest.config import UsageError 21from _pytest.config.argparsing import OptionGroup 22from _pytest.config.argparsing import Parser 23from _pytest.debugging import pytestPDB as __pytestPDB 24from _pytest.doctest import DoctestItem 25from _pytest.fixtures import fixture 26from _pytest.fixtures import FixtureDef 27from _pytest.fixtures import FixtureLookupError 28from _pytest.fixtures import FixtureRequest 29from _pytest.fixtures import yield_fixture 30from _pytest.freeze_support import freeze_includes 31from _pytest.legacypath import TempdirFactory 32from _pytest.legacypath import Testdir 33from _pytest.logging import LogCaptureFixture 34from _pytest.main import Dir 35from _pytest.main import Session 36from _pytest.mark import HIDDEN_PARAM 37from _pytest.mark import Mark 38from _pytest.mark import MARK_GEN as mark 39from _pytest.mark import MarkDecorator 40from _pytest.mark import MarkGenerator 41from _pytest.mark import param 42from _pytest.monkeypatch import MonkeyPatch 43from _pytest.nodes import Collector 44from _pytest.nodes import Directory 45from _pytest.nodes import File 46from _pytest.nodes import Item 47from _pytest.outcomes import exit 48from _pytest.outcomes import fail 49from _pytest.outcomes import importorskip 50from _pytest.outcomes import skip 51from _pytest.outcomes import xfail 52from _pytest.pytester import HookRecorder 53from _pytest.pytester import LineMatcher 54from _pytest.pytester import Pytester 55from _pytest.pytester import RecordedHookCall 56from _pytest.pytester import RunResult 57from _pytest.python import Class 58from _pytest.python import Function 59from _pytest.python import Metafunc 60from _pytest.python import Module 61from _pytest.python import Package 62from _pytest.python_api import approx 63from _pytest.raises import raises 64from _pytest.raises import RaisesExc 65from _pytest.raises import RaisesGroup 66from _pytest.recwarn import deprecated_call 67from _pytest.recwarn import WarningsRecorder 68from _pytest.recwarn import warns 69from _pytest.reports import CollectReport 70from _pytest.reports import TestReport 71from _pytest.runner import CallInfo 72from _pytest.stash import Stash 73from _pytest.stash import StashKey 74from _pytest.subtests import SubtestReport 75from _pytest.subtests import Subtests 76from _pytest.terminal import TerminalReporter 77from _pytest.terminal import TestShortLogReport 78from _pytest.tmpdir import TempPathFactory 79from _pytest.warning_types import PytestAssertRewriteWarning 80from _pytest.warning_types import PytestCacheWarning 81from _pytest.warning_types import PytestCollectionWarning 82from _pytest.warning_types import PytestConfigWarning 83from _pytest.warning_types import PytestDeprecationWarning 84from _pytest.warning_types import PytestExperimentalApiWarning 85from _pytest.warning_types import PytestFDWarning 86from _pytest.warning_types import PytestRemovedIn9Warning 87from _pytest.warning_types import PytestRemovedIn10Warning 88from _pytest.warning_types import PytestReturnNotNoneWarning 89from _pytest.warning_types import PytestUnhandledThreadExceptionWarning 90from _pytest.warning_types import PytestUnknownMarkWarning 91from _pytest.warning_types import PytestUnraisableExceptionWarning 92from _pytest.warning_types import PytestWarning 93 94 95set_trace = __pytestPDB.set_trace 96 97 98__all__ = [ 99 "HIDDEN_PARAM", 100 "Cache", 101 "CallInfo", 102 "CaptureFixture", 103 "Class", 104 "CollectReport", 105 "Collector", 106 "Config", 107 "Dir", 108 "Directory", 109 "DoctestItem", 110 "ExceptionInfo", 111 "ExitCode", 112 "File", 113 "FixtureDef", 114 "FixtureLookupError", 115 "FixtureRequest", 116 "Function", 117 "HookRecorder", 118 "Item", 119 "LineMatcher", 120 "LogCaptureFixture", 121 "Mark", 122 "MarkDecorator", 123 "MarkGenerator", 124 "Metafunc", 125 "Module", 126 "MonkeyPatch", 127 "OptionGroup", 128 "Package", 129 "Parser", 130 "PytestAssertRewriteWarning", 131 "PytestCacheWarning", 132 "PytestCollectionWarning", 133 "PytestConfigWarning", 134 "PytestDeprecationWarning", 135 "PytestExperimentalApiWarning", 136 "PytestFDWarning", 137 "PytestPluginManager", 138 "PytestRemovedIn9Warning", 139 "PytestRemovedIn10Warning", 140 "PytestReturnNotNoneWarning", 141 "PytestUnhandledThreadExceptionWarning", 142 "PytestUnknownMarkWarning", 143 "PytestUnraisableExceptionWarning", 144 "PytestWarning", 145 "Pytester", 146 "RaisesExc", 147 "RaisesGroup", 148 "RecordedHookCall", 149 "RunResult", 150 "Session", 151 "Stash", 152 "StashKey", 153 "SubtestReport", 154 "Subtests", 155 "TempPathFactory", 156 "TempdirFactory", 157 "TerminalReporter", 158 "TestReport", 159 "TestShortLogReport", 160 "Testdir", 161 "UsageError", 162 "WarningsRecorder", 163 "__version__", 164 "approx", 165 "cmdline", 166 "console_main", 167 "deprecated_call", 168 "exit", 169 "fail", 170 "fixture", 171 "freeze_includes", 172 "hookimpl", 173 "hookspec", 174 "importorskip", 175 "main", 176 "mark", 177 "param", 178 "raises", 179 "register_assert_rewrite", 180 "set_trace", 181 "skip", 182 "version_tuple", 183 "warns", 184 "xfail", 185 "yield_fixture", 186]
56@final 57@dataclasses.dataclass 58class Cache: 59 """Instance of the `cache` fixture.""" 60 61 _cachedir: Path = dataclasses.field(repr=False) 62 _config: Config = dataclasses.field(repr=False) 63 64 # Sub-directory under cache-dir for directories created by `mkdir()`. 65 _CACHE_PREFIX_DIRS = "d" 66 67 # Sub-directory under cache-dir for values created by `set()`. 68 _CACHE_PREFIX_VALUES = "v" 69 70 def __init__( 71 self, cachedir: Path, config: Config, *, _ispytest: bool = False 72 ) -> None: 73 check_ispytest(_ispytest) 74 self._cachedir = cachedir 75 self._config = config 76 77 @classmethod 78 def for_config(cls, config: Config, *, _ispytest: bool = False) -> Cache: 79 """Create the Cache instance for a Config. 80 81 :meta private: 82 """ 83 check_ispytest(_ispytest) 84 cachedir = cls.cache_dir_from_config(config, _ispytest=True) 85 if config.getoption("cacheclear") and cachedir.is_dir(): 86 cls.clear_cache(cachedir, _ispytest=True) 87 return cls(cachedir, config, _ispytest=True) 88 89 @classmethod 90 def clear_cache(cls, cachedir: Path, _ispytest: bool = False) -> None: 91 """Clear the sub-directories used to hold cached directories and values. 92 93 :meta private: 94 """ 95 check_ispytest(_ispytest) 96 for prefix in (cls._CACHE_PREFIX_DIRS, cls._CACHE_PREFIX_VALUES): 97 d = cachedir / prefix 98 if d.is_dir(): 99 rm_rf(d) 100 101 @staticmethod 102 def cache_dir_from_config(config: Config, *, _ispytest: bool = False) -> Path: 103 """Get the path to the cache directory for a Config. 104 105 :meta private: 106 """ 107 check_ispytest(_ispytest) 108 return resolve_from_str(config.getini("cache_dir"), config.rootpath) 109 110 def warn(self, fmt: str, *, _ispytest: bool = False, **args: object) -> None: 111 """Issue a cache warning. 112 113 :meta private: 114 """ 115 check_ispytest(_ispytest) 116 import warnings 117 118 from _pytest.warning_types import PytestCacheWarning 119 120 warnings.warn( 121 PytestCacheWarning(fmt.format(**args) if args else fmt), 122 self._config.hook, 123 stacklevel=3, 124 ) 125 126 def _mkdir(self, path: Path) -> None: 127 self._ensure_cache_dir_and_supporting_files() 128 path.mkdir(exist_ok=True, parents=True) 129 130 def mkdir(self, name: str) -> Path: 131 """Return a directory path object with the given name. 132 133 If the directory does not yet exist, it will be created. You can use 134 it to manage files to e.g. store/retrieve database dumps across test 135 sessions. 136 137 .. versionadded:: 7.0 138 139 :param name: 140 Must be a string not containing a ``/`` separator. 141 Make sure the name contains your plugin or application 142 identifiers to prevent clashes with other cache users. 143 """ 144 path = Path(name) 145 if len(path.parts) > 1: 146 raise ValueError("name is not allowed to contain path separators") 147 res = self._cachedir.joinpath(self._CACHE_PREFIX_DIRS, path) 148 self._mkdir(res) 149 return res 150 151 def _getvaluepath(self, key: str) -> Path: 152 return self._cachedir.joinpath(self._CACHE_PREFIX_VALUES, Path(key)) 153 154 def get(self, key: str, default): 155 """Return the cached value for the given key. 156 157 If no value was yet cached or the value cannot be read, the specified 158 default is returned. 159 160 :param key: 161 Must be a ``/`` separated value. Usually the first 162 name is the name of your plugin or your application. 163 :param default: 164 The value to return in case of a cache-miss or invalid cache value. 165 """ 166 path = self._getvaluepath(key) 167 try: 168 with path.open("r", encoding="UTF-8") as f: 169 return json.load(f) 170 except (ValueError, OSError): 171 return default 172 173 def set(self, key: str, value: object) -> None: 174 """Save value for the given key. 175 176 :param key: 177 Must be a ``/`` separated value. Usually the first 178 name is the name of your plugin or your application. 179 :param value: 180 Must be of any combination of basic python types, 181 including nested types like lists of dictionaries. 182 """ 183 path = self._getvaluepath(key) 184 try: 185 self._mkdir(path.parent) 186 except OSError as exc: 187 self.warn( 188 f"could not create cache path {path}: {exc}", 189 _ispytest=True, 190 ) 191 return 192 data = json.dumps(value, ensure_ascii=False, indent=2) 193 try: 194 f = path.open("w", encoding="UTF-8") 195 except OSError as exc: 196 self.warn( 197 f"cache could not write path {path}: {exc}", 198 _ispytest=True, 199 ) 200 else: 201 with f: 202 f.write(data) 203 204 def _ensure_cache_dir_and_supporting_files(self) -> None: 205 """Create the cache dir and its supporting files.""" 206 if self._cachedir.is_dir(): 207 return 208 209 self._cachedir.parent.mkdir(parents=True, exist_ok=True) 210 with tempfile.TemporaryDirectory( 211 prefix="pytest-cache-files-", 212 dir=self._cachedir.parent, 213 ) as newpath: 214 path = Path(newpath) 215 216 # Reset permissions to the default, see #12308. 217 # Note: there's no way to get the current umask atomically, eek. 218 umask = os.umask(0o022) 219 os.umask(umask) 220 path.chmod(0o777 - umask) 221 222 with open(path.joinpath("README.md"), "x", encoding="UTF-8") as f: 223 f.write(README_CONTENT) 224 with open(path.joinpath(".gitignore"), "x", encoding="UTF-8") as f: 225 f.write("# Created by pytest automatically.\n*\n") 226 with open(path.joinpath("CACHEDIR.TAG"), "xb") as f: 227 f.write(CACHEDIR_TAG_CONTENT) 228 229 try: 230 path.rename(self._cachedir) 231 except OSError as e: 232 # If 2 concurrent pytests both race to the rename, the loser 233 # gets "Directory not empty" from the rename. In this case, 234 # everything is handled so just continue (while letting the 235 # temporary directory be cleaned up). 236 # On Windows, the error is a FileExistsError which translates to EEXIST. 237 if e.errno not in (errno.ENOTEMPTY, errno.EEXIST): 238 raise 239 else: 240 # Create a directory in place of the one we just moved so that 241 # `TemporaryDirectory`'s cleanup doesn't complain. 242 # 243 # TODO: pass ignore_cleanup_errors=True when we no longer support python < 3.10. 244 # See https://github.com/python/cpython/issues/74168. Note that passing 245 # delete=False would do the wrong thing in case of errors and isn't supported 246 # until python 3.12. 247 path.mkdir()
Instance of the cache fixture.
77 @classmethod 78 def for_config(cls, config: Config, *, _ispytest: bool = False) -> Cache: 79 """Create the Cache instance for a Config. 80 81 :meta private: 82 """ 83 check_ispytest(_ispytest) 84 cachedir = cls.cache_dir_from_config(config, _ispytest=True) 85 if config.getoption("cacheclear") and cachedir.is_dir(): 86 cls.clear_cache(cachedir, _ispytest=True) 87 return cls(cachedir, config, _ispytest=True)
Create the Cache instance for a Config.
:meta private:
89 @classmethod 90 def clear_cache(cls, cachedir: Path, _ispytest: bool = False) -> None: 91 """Clear the sub-directories used to hold cached directories and values. 92 93 :meta private: 94 """ 95 check_ispytest(_ispytest) 96 for prefix in (cls._CACHE_PREFIX_DIRS, cls._CACHE_PREFIX_VALUES): 97 d = cachedir / prefix 98 if d.is_dir(): 99 rm_rf(d)
Clear the sub-directories used to hold cached directories and values.
:meta private:
101 @staticmethod 102 def cache_dir_from_config(config: Config, *, _ispytest: bool = False) -> Path: 103 """Get the path to the cache directory for a Config. 104 105 :meta private: 106 """ 107 check_ispytest(_ispytest) 108 return resolve_from_str(config.getini("cache_dir"), config.rootpath)
Get the path to the cache directory for a Config.
:meta private:
110 def warn(self, fmt: str, *, _ispytest: bool = False, **args: object) -> None: 111 """Issue a cache warning. 112 113 :meta private: 114 """ 115 check_ispytest(_ispytest) 116 import warnings 117 118 from _pytest.warning_types import PytestCacheWarning 119 120 warnings.warn( 121 PytestCacheWarning(fmt.format(**args) if args else fmt), 122 self._config.hook, 123 stacklevel=3, 124 )
Issue a cache warning.
:meta private:
130 def mkdir(self, name: str) -> Path: 131 """Return a directory path object with the given name. 132 133 If the directory does not yet exist, it will be created. You can use 134 it to manage files to e.g. store/retrieve database dumps across test 135 sessions. 136 137 .. versionadded:: 7.0 138 139 :param name: 140 Must be a string not containing a ``/`` separator. 141 Make sure the name contains your plugin or application 142 identifiers to prevent clashes with other cache users. 143 """ 144 path = Path(name) 145 if len(path.parts) > 1: 146 raise ValueError("name is not allowed to contain path separators") 147 res = self._cachedir.joinpath(self._CACHE_PREFIX_DIRS, path) 148 self._mkdir(res) 149 return res
Return a directory path object with the given name.
If the directory does not yet exist, it will be created. You can use it to manage files to e.g. store/retrieve database dumps across test sessions.
New in version 7.0.
Parameters
- name:
Must be a string not containing a
/separator. Make sure the name contains your plugin or application identifiers to prevent clashes with other cache users.
154 def get(self, key: str, default): 155 """Return the cached value for the given key. 156 157 If no value was yet cached or the value cannot be read, the specified 158 default is returned. 159 160 :param key: 161 Must be a ``/`` separated value. Usually the first 162 name is the name of your plugin or your application. 163 :param default: 164 The value to return in case of a cache-miss or invalid cache value. 165 """ 166 path = self._getvaluepath(key) 167 try: 168 with path.open("r", encoding="UTF-8") as f: 169 return json.load(f) 170 except (ValueError, OSError): 171 return default
Return the cached value for the given key.
If no value was yet cached or the value cannot be read, the specified default is returned.
Parameters
- key:
Must be a
/separated value. Usually the first name is the name of your plugin or your application. - default: The value to return in case of a cache-miss or invalid cache value.
173 def set(self, key: str, value: object) -> None: 174 """Save value for the given key. 175 176 :param key: 177 Must be a ``/`` separated value. Usually the first 178 name is the name of your plugin or your application. 179 :param value: 180 Must be of any combination of basic python types, 181 including nested types like lists of dictionaries. 182 """ 183 path = self._getvaluepath(key) 184 try: 185 self._mkdir(path.parent) 186 except OSError as exc: 187 self.warn( 188 f"could not create cache path {path}: {exc}", 189 _ispytest=True, 190 ) 191 return 192 data = json.dumps(value, ensure_ascii=False, indent=2) 193 try: 194 f = path.open("w", encoding="UTF-8") 195 except OSError as exc: 196 self.warn( 197 f"cache could not write path {path}: {exc}", 198 _ispytest=True, 199 ) 200 else: 201 with f: 202 f.write(data)
Save value for the given key.
Parameters
- key:
Must be a
/separated value. Usually the first name is the name of your plugin or your application. - value: Must be of any combination of basic python types, including nested types like lists of dictionaries.
284@final 285@dataclasses.dataclass 286class CallInfo(Generic[TResult]): 287 """Result/Exception info of a function invocation.""" 288 289 _result: TResult | None 290 #: The captured exception of the call, if it raised. 291 excinfo: ExceptionInfo[BaseException] | None 292 #: The system time when the call started, in seconds since the epoch. 293 start: float 294 #: The system time when the call ended, in seconds since the epoch. 295 stop: float 296 #: The call duration, in seconds. 297 duration: float 298 #: The context of invocation: "collect", "setup", "call" or "teardown". 299 when: Literal["collect", "setup", "call", "teardown"] 300 301 def __init__( 302 self, 303 result: TResult | None, 304 excinfo: ExceptionInfo[BaseException] | None, 305 start: float, 306 stop: float, 307 duration: float, 308 when: Literal["collect", "setup", "call", "teardown"], 309 *, 310 _ispytest: bool = False, 311 ) -> None: 312 check_ispytest(_ispytest) 313 self._result = result 314 self.excinfo = excinfo 315 self.start = start 316 self.stop = stop 317 self.duration = duration 318 self.when = when 319 320 @property 321 def result(self) -> TResult: 322 """The return value of the call, if it didn't raise. 323 324 Can only be accessed if excinfo is None. 325 """ 326 if self.excinfo is not None: 327 raise AttributeError(f"{self!r} has no valid result") 328 # The cast is safe because an exception wasn't raised, hence 329 # _result has the expected function return type (which may be 330 # None, that's why a cast and not an assert). 331 return cast(TResult, self._result) 332 333 @classmethod 334 def from_call( 335 cls, 336 func: Callable[[], TResult], 337 when: Literal["collect", "setup", "call", "teardown"], 338 reraise: type[BaseException] | tuple[type[BaseException], ...] | None = None, 339 ) -> CallInfo[TResult]: 340 """Call func, wrapping the result in a CallInfo. 341 342 :param func: 343 The function to call. Called without arguments. 344 :type func: Callable[[], _pytest.runner.TResult] 345 :param when: 346 The phase in which the function is called. 347 :param reraise: 348 Exception or exceptions that shall propagate if raised by the 349 function, instead of being wrapped in the CallInfo. 350 """ 351 excinfo = None 352 instant = timing.Instant() 353 try: 354 result: TResult | None = func() 355 except BaseException: 356 excinfo = ExceptionInfo.from_current() 357 if reraise is not None and isinstance(excinfo.value, reraise): 358 raise 359 result = None 360 duration = instant.elapsed() 361 return cls( 362 start=duration.start.time, 363 stop=duration.stop.time, 364 duration=duration.seconds, 365 when=when, 366 result=result, 367 excinfo=excinfo, 368 _ispytest=True, 369 ) 370 371 def __repr__(self) -> str: 372 if self.excinfo is None: 373 return f"<CallInfo when={self.when!r} result: {self._result!r}>" 374 return f"<CallInfo when={self.when!r} excinfo={self.excinfo!r}>"
Result/Exception info of a function invocation.
301 def __init__( 302 self, 303 result: TResult | None, 304 excinfo: ExceptionInfo[BaseException] | None, 305 start: float, 306 stop: float, 307 duration: float, 308 when: Literal["collect", "setup", "call", "teardown"], 309 *, 310 _ispytest: bool = False, 311 ) -> None: 312 check_ispytest(_ispytest) 313 self._result = result 314 self.excinfo = excinfo 315 self.start = start 316 self.stop = stop 317 self.duration = duration 318 self.when = when
320 @property 321 def result(self) -> TResult: 322 """The return value of the call, if it didn't raise. 323 324 Can only be accessed if excinfo is None. 325 """ 326 if self.excinfo is not None: 327 raise AttributeError(f"{self!r} has no valid result") 328 # The cast is safe because an exception wasn't raised, hence 329 # _result has the expected function return type (which may be 330 # None, that's why a cast and not an assert). 331 return cast(TResult, self._result)
The return value of the call, if it didn't raise.
Can only be accessed if excinfo is None.
333 @classmethod 334 def from_call( 335 cls, 336 func: Callable[[], TResult], 337 when: Literal["collect", "setup", "call", "teardown"], 338 reraise: type[BaseException] | tuple[type[BaseException], ...] | None = None, 339 ) -> CallInfo[TResult]: 340 """Call func, wrapping the result in a CallInfo. 341 342 :param func: 343 The function to call. Called without arguments. 344 :type func: Callable[[], _pytest.runner.TResult] 345 :param when: 346 The phase in which the function is called. 347 :param reraise: 348 Exception or exceptions that shall propagate if raised by the 349 function, instead of being wrapped in the CallInfo. 350 """ 351 excinfo = None 352 instant = timing.Instant() 353 try: 354 result: TResult | None = func() 355 except BaseException: 356 excinfo = ExceptionInfo.from_current() 357 if reraise is not None and isinstance(excinfo.value, reraise): 358 raise 359 result = None 360 duration = instant.elapsed() 361 return cls( 362 start=duration.start.time, 363 stop=duration.stop.time, 364 duration=duration.seconds, 365 when=when, 366 result=result, 367 excinfo=excinfo, 368 _ispytest=True, 369 )
Call func, wrapping the result in a CallInfo.
Parameters
- func: The function to call. Called without arguments.
- when: The phase in which the function is called.
- reraise: Exception or exceptions that shall propagate if raised by the function, instead of being wrapped in the CallInfo.
917class CaptureFixture(Generic[AnyStr]): 918 """Object returned by the :fixture:`capsys`, :fixture:`capsysbinary`, 919 :fixture:`capfd` and :fixture:`capfdbinary` fixtures.""" 920 921 def __init__( 922 self, 923 captureclass: type[CaptureBase[AnyStr]], 924 request: SubRequest, 925 *, 926 config: dict[str, Any] | None = None, 927 _ispytest: bool = False, 928 ) -> None: 929 check_ispytest(_ispytest) 930 self.captureclass: type[CaptureBase[AnyStr]] = captureclass 931 self.request = request 932 self._config = config if config else {} 933 self._capture: MultiCapture[AnyStr] | None = None 934 self._captured_out: AnyStr = self.captureclass.EMPTY_BUFFER 935 self._captured_err: AnyStr = self.captureclass.EMPTY_BUFFER 936 937 def _start(self) -> None: 938 if self._capture is None: 939 self._capture = MultiCapture( 940 in_=None, 941 out=self.captureclass(1, **self._config), 942 err=self.captureclass(2, **self._config), 943 ) 944 self._capture.start_capturing() 945 946 def close(self) -> None: 947 if self._capture is not None: 948 out, err = self._capture.pop_outerr_to_orig() 949 self._captured_out += out 950 self._captured_err += err 951 self._capture.stop_capturing() 952 self._capture = None 953 954 def readouterr(self) -> CaptureResult[AnyStr]: 955 """Read and return the captured output so far, resetting the internal 956 buffer. 957 958 :returns: 959 The captured content as a namedtuple with ``out`` and ``err`` 960 string attributes. 961 """ 962 captured_out, captured_err = self._captured_out, self._captured_err 963 if self._capture is not None: 964 out, err = self._capture.readouterr() 965 captured_out += out 966 captured_err += err 967 self._captured_out = self.captureclass.EMPTY_BUFFER 968 self._captured_err = self.captureclass.EMPTY_BUFFER 969 return CaptureResult(captured_out, captured_err) 970 971 def _suspend(self) -> None: 972 """Suspend this fixture's own capturing temporarily.""" 973 if self._capture is not None: 974 self._capture.suspend_capturing() 975 976 def _resume(self) -> None: 977 """Resume this fixture's own capturing temporarily.""" 978 if self._capture is not None: 979 self._capture.resume_capturing() 980 981 def _is_started(self) -> bool: 982 """Whether actively capturing -- not disabled or closed.""" 983 if self._capture is not None: 984 return self._capture.is_started() 985 return False 986 987 @contextlib.contextmanager 988 def disabled(self) -> Generator[None]: 989 """Temporarily disable capturing while inside the ``with`` block.""" 990 capmanager: CaptureManager = self.request.config.pluginmanager.getplugin( 991 "capturemanager" 992 ) 993 with capmanager.global_and_fixture_disabled(): 994 yield
Object returned by the :fixture:capsys, :fixture:capsysbinary,
:fixture:capfd and :fixture:capfdbinary fixtures.
921 def __init__( 922 self, 923 captureclass: type[CaptureBase[AnyStr]], 924 request: SubRequest, 925 *, 926 config: dict[str, Any] | None = None, 927 _ispytest: bool = False, 928 ) -> None: 929 check_ispytest(_ispytest) 930 self.captureclass: type[CaptureBase[AnyStr]] = captureclass 931 self.request = request 932 self._config = config if config else {} 933 self._capture: MultiCapture[AnyStr] | None = None 934 self._captured_out: AnyStr = self.captureclass.EMPTY_BUFFER 935 self._captured_err: AnyStr = self.captureclass.EMPTY_BUFFER
954 def readouterr(self) -> CaptureResult[AnyStr]: 955 """Read and return the captured output so far, resetting the internal 956 buffer. 957 958 :returns: 959 The captured content as a namedtuple with ``out`` and ``err`` 960 string attributes. 961 """ 962 captured_out, captured_err = self._captured_out, self._captured_err 963 if self._capture is not None: 964 out, err = self._capture.readouterr() 965 captured_out += out 966 captured_err += err 967 self._captured_out = self.captureclass.EMPTY_BUFFER 968 self._captured_err = self.captureclass.EMPTY_BUFFER 969 return CaptureResult(captured_out, captured_err)
Read and return the captured output so far, resetting the internal buffer.
:returns:
The captured content as a namedtuple with out and err
string attributes.
987 @contextlib.contextmanager 988 def disabled(self) -> Generator[None]: 989 """Temporarily disable capturing while inside the ``with`` block.""" 990 capmanager: CaptureManager = self.request.config.pluginmanager.getplugin( 991 "capturemanager" 992 ) 993 with capmanager.global_and_fixture_disabled(): 994 yield
Temporarily disable capturing while inside the with block.
746class Class(PyCollector): 747 """Collector for test methods (and nested classes) in a Python class.""" 748 749 @classmethod 750 def from_parent(cls, parent, *, name, obj=None, **kw) -> Self: # type: ignore[override] 751 """The public constructor.""" 752 return super().from_parent(name=name, parent=parent, **kw) 753 754 def newinstance(self): 755 return self.obj() 756 757 def collect(self) -> Iterable[nodes.Item | nodes.Collector]: 758 if not safe_getattr(self.obj, "__test__", True): 759 return [] 760 if hasinit(self.obj): 761 assert self.parent is not None 762 self.warn( 763 PytestCollectionWarning( 764 f"cannot collect test class {self.obj.__name__!r} because it has a " 765 f"__init__ constructor (from: {self.parent.nodeid})" 766 ) 767 ) 768 return [] 769 elif hasnew(self.obj): 770 assert self.parent is not None 771 self.warn( 772 PytestCollectionWarning( 773 f"cannot collect test class {self.obj.__name__!r} because it has a " 774 f"__new__ constructor (from: {self.parent.nodeid})" 775 ) 776 ) 777 return [] 778 779 self._register_setup_class_fixture() 780 self._register_setup_method_fixture() 781 782 self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid) 783 784 return super().collect() 785 786 def _register_setup_class_fixture(self) -> None: 787 """Register an autouse, class scoped fixture into the collected class object 788 that invokes setup_class/teardown_class if either or both are available. 789 790 Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with 791 other fixtures (#517). 792 """ 793 setup_class = _get_first_non_fixture_func(self.obj, ("setup_class",)) 794 teardown_class = _get_first_non_fixture_func(self.obj, ("teardown_class",)) 795 if setup_class is None and teardown_class is None: 796 return 797 798 def xunit_setup_class_fixture(request) -> Generator[None]: 799 cls = request.cls 800 if setup_class is not None: 801 func = getimfunc(setup_class) 802 _call_with_optional_argument(func, cls) 803 yield 804 if teardown_class is not None: 805 func = getimfunc(teardown_class) 806 _call_with_optional_argument(func, cls) 807 808 self.session._fixturemanager._register_fixture( 809 # Use a unique name to speed up lookup. 810 name=f"_xunit_setup_class_fixture_{self.obj.__qualname__}", 811 func=xunit_setup_class_fixture, 812 nodeid=self.nodeid, 813 scope="class", 814 autouse=True, 815 ) 816 817 def _register_setup_method_fixture(self) -> None: 818 """Register an autouse, function scoped fixture into the collected class object 819 that invokes setup_method/teardown_method if either or both are available. 820 821 Using a fixture to invoke these methods ensures we play nicely and unsurprisingly with 822 other fixtures (#517). 823 """ 824 setup_name = "setup_method" 825 setup_method = _get_first_non_fixture_func(self.obj, (setup_name,)) 826 teardown_name = "teardown_method" 827 teardown_method = _get_first_non_fixture_func(self.obj, (teardown_name,)) 828 if setup_method is None and teardown_method is None: 829 return 830 831 def xunit_setup_method_fixture(request) -> Generator[None]: 832 instance = request.instance 833 method = request.function 834 if setup_method is not None: 835 func = getattr(instance, setup_name) 836 _call_with_optional_argument(func, method) 837 yield 838 if teardown_method is not None: 839 func = getattr(instance, teardown_name) 840 _call_with_optional_argument(func, method) 841 842 self.session._fixturemanager._register_fixture( 843 # Use a unique name to speed up lookup. 844 name=f"_xunit_setup_method_fixture_{self.obj.__qualname__}", 845 func=xunit_setup_method_fixture, 846 nodeid=self.nodeid, 847 scope="function", 848 autouse=True, 849 )
Collector for test methods (and nested classes) in a Python class.
749 @classmethod 750 def from_parent(cls, parent, *, name, obj=None, **kw) -> Self: # type: ignore[override] 751 """The public constructor.""" 752 return super().from_parent(name=name, parent=parent, **kw)
The public constructor.
757 def collect(self) -> Iterable[nodes.Item | nodes.Collector]: 758 if not safe_getattr(self.obj, "__test__", True): 759 return [] 760 if hasinit(self.obj): 761 assert self.parent is not None 762 self.warn( 763 PytestCollectionWarning( 764 f"cannot collect test class {self.obj.__name__!r} because it has a " 765 f"__init__ constructor (from: {self.parent.nodeid})" 766 ) 767 ) 768 return [] 769 elif hasnew(self.obj): 770 assert self.parent is not None 771 self.warn( 772 PytestCollectionWarning( 773 f"cannot collect test class {self.obj.__name__!r} because it has a " 774 f"__new__ constructor (from: {self.parent.nodeid})" 775 ) 776 ) 777 return [] 778 779 self._register_setup_class_fixture() 780 self._register_setup_method_fixture() 781 782 self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid) 783 784 return super().collect()
Collect children (items and collectors) for this collector.
457@final 458class CollectReport(BaseReport): 459 """Collection report object. 460 461 Reports can contain arbitrary extra attributes. 462 """ 463 464 when = "collect" 465 466 def __init__( 467 self, 468 nodeid: str, 469 outcome: Literal["passed", "failed", "skipped"], 470 longrepr: None 471 | ExceptionInfo[BaseException] 472 | tuple[str, int, str] 473 | str 474 | TerminalRepr, 475 result: list[Item | Collector] | None, 476 sections: Iterable[tuple[str, str]] = (), 477 **extra, 478 ) -> None: 479 #: Normalized collection nodeid. 480 self.nodeid = nodeid 481 482 #: Test outcome, always one of "passed", "failed", "skipped". 483 self.outcome = outcome 484 485 #: None or a failure representation. 486 self.longrepr = longrepr 487 488 #: The collected items and collection nodes. 489 self.result = result or [] 490 491 #: Tuples of str ``(heading, content)`` with extra information 492 #: for the test report. Used by pytest to add text captured 493 #: from ``stdout``, ``stderr``, and intercepted logging events. May 494 #: be used by other plugins to add arbitrary information to reports. 495 self.sections = list(sections) 496 497 self.__dict__.update(extra) 498 499 @property 500 def location( # type:ignore[override] 501 self, 502 ) -> tuple[str, int | None, str] | None: 503 return (self.fspath, None, self.fspath) 504 505 def __repr__(self) -> str: 506 return f"<CollectReport {self.nodeid!r} lenresult={len(self.result)} outcome={self.outcome!r}>"
Collection report object.
Reports can contain arbitrary extra attributes.
466 def __init__( 467 self, 468 nodeid: str, 469 outcome: Literal["passed", "failed", "skipped"], 470 longrepr: None 471 | ExceptionInfo[BaseException] 472 | tuple[str, int, str] 473 | str 474 | TerminalRepr, 475 result: list[Item | Collector] | None, 476 sections: Iterable[tuple[str, str]] = (), 477 **extra, 478 ) -> None: 479 #: Normalized collection nodeid. 480 self.nodeid = nodeid 481 482 #: Test outcome, always one of "passed", "failed", "skipped". 483 self.outcome = outcome 484 485 #: None or a failure representation. 486 self.longrepr = longrepr 487 488 #: The collected items and collection nodes. 489 self.result = result or [] 490 491 #: Tuples of str ``(heading, content)`` with extra information 492 #: for the test report. Used by pytest to add text captured 493 #: from ``stdout``, ``stderr``, and intercepted logging events. May 494 #: be used by other plugins to add arbitrary information to reports. 495 self.sections = list(sections) 496 497 self.__dict__.update(extra)
500class Collector(Node, abc.ABC): 501 """Base class of all collectors. 502 503 Collector create children through `collect()` and thus iteratively build 504 the collection tree. 505 """ 506 507 class CollectError(Exception): 508 """An error during collection, contains a custom message.""" 509 510 @abc.abstractmethod 511 def collect(self) -> Iterable[Item | Collector]: 512 """Collect children (items and collectors) for this collector.""" 513 raise NotImplementedError("abstract") 514 515 # TODO: This omits the style= parameter which breaks Liskov Substitution. 516 def repr_failure( # type: ignore[override] 517 self, excinfo: ExceptionInfo[BaseException] 518 ) -> str | TerminalRepr: 519 """Return a representation of a collection failure. 520 521 :param excinfo: Exception information for the failure. 522 """ 523 if isinstance(excinfo.value, self.CollectError) and not self.config.getoption( 524 "fulltrace", False 525 ): 526 exc = excinfo.value 527 return str(exc.args[0]) 528 529 # Respect explicit tbstyle option, but default to "short" 530 # (_repr_failure_py uses "long" with "fulltrace" option always). 531 tbstyle = self.config.getoption("tbstyle", "auto") 532 if tbstyle == "auto": 533 tbstyle = "short" 534 535 return self._repr_failure_py(excinfo, style=tbstyle) 536 537 def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: 538 if hasattr(self, "path"): 539 traceback = excinfo.traceback 540 ntraceback = traceback.cut(path=self.path) 541 if ntraceback == traceback: 542 ntraceback = ntraceback.cut(excludepath=tracebackcutdir) 543 return ntraceback.filter(excinfo) 544 return excinfo.traceback
Base class of all collectors.
Collector create children through collect() and thus iteratively build
the collection tree.
510 @abc.abstractmethod 511 def collect(self) -> Iterable[Item | Collector]: 512 """Collect children (items and collectors) for this collector.""" 513 raise NotImplementedError("abstract")
Collect children (items and collectors) for this collector.
516 def repr_failure( # type: ignore[override] 517 self, excinfo: ExceptionInfo[BaseException] 518 ) -> str | TerminalRepr: 519 """Return a representation of a collection failure. 520 521 :param excinfo: Exception information for the failure. 522 """ 523 if isinstance(excinfo.value, self.CollectError) and not self.config.getoption( 524 "fulltrace", False 525 ): 526 exc = excinfo.value 527 return str(exc.args[0]) 528 529 # Respect explicit tbstyle option, but default to "short" 530 # (_repr_failure_py uses "long" with "fulltrace" option always). 531 tbstyle = self.config.getoption("tbstyle", "auto") 532 if tbstyle == "auto": 533 tbstyle = "short" 534 535 return self._repr_failure_py(excinfo, style=tbstyle)
Return a representation of a collection failure.
Parameters
- excinfo: Exception information for the failure.
An error during collection, contains a custom message.
1017@final 1018class Config: 1019 """Access to configuration values, pluginmanager and plugin hooks. 1020 1021 :param PytestPluginManager pluginmanager: 1022 A pytest PluginManager. 1023 1024 :param InvocationParams invocation_params: 1025 Object containing parameters regarding the :func:`pytest.main` 1026 invocation. 1027 """ 1028 1029 @final 1030 @dataclasses.dataclass(frozen=True) 1031 class InvocationParams: 1032 """Holds parameters passed during :func:`pytest.main`. 1033 1034 The object attributes are read-only. 1035 1036 .. versionadded:: 5.1 1037 1038 .. note:: 1039 1040 Note that the environment variable ``PYTEST_ADDOPTS`` and the ``addopts`` 1041 configuration option are handled by pytest, not being included in the ``args`` attribute. 1042 1043 Plugins accessing ``InvocationParams`` must be aware of that. 1044 """ 1045 1046 args: tuple[str, ...] 1047 """The command-line arguments as passed to :func:`pytest.main`.""" 1048 plugins: Sequence[str | _PluggyPlugin] | None 1049 """Extra plugins, might be `None`.""" 1050 dir: pathlib.Path 1051 """The directory from which :func:`pytest.main` was invoked.""" 1052 1053 def __init__( 1054 self, 1055 *, 1056 args: Iterable[str], 1057 plugins: Sequence[str | _PluggyPlugin] | None, 1058 dir: pathlib.Path, 1059 ) -> None: 1060 object.__setattr__(self, "args", tuple(args)) 1061 object.__setattr__(self, "plugins", plugins) 1062 object.__setattr__(self, "dir", dir) 1063 1064 class ArgsSource(enum.Enum): 1065 """Indicates the source of the test arguments. 1066 1067 .. versionadded:: 7.2 1068 """ 1069 1070 #: Command line arguments. 1071 ARGS = enum.auto() 1072 #: Invocation directory. 1073 INVOCATION_DIR = enum.auto() 1074 INCOVATION_DIR = INVOCATION_DIR # backwards compatibility alias 1075 #: 'testpaths' configuration value. 1076 TESTPATHS = enum.auto() 1077 1078 # Set by cacheprovider plugin. 1079 cache: Cache 1080 1081 def __init__( 1082 self, 1083 pluginmanager: PytestPluginManager, 1084 *, 1085 invocation_params: InvocationParams | None = None, 1086 ) -> None: 1087 if invocation_params is None: 1088 invocation_params = self.InvocationParams( 1089 args=(), plugins=None, dir=pathlib.Path.cwd() 1090 ) 1091 1092 self.option = argparse.Namespace() 1093 """Access to command line option as attributes. 1094 1095 :type: argparse.Namespace 1096 """ 1097 1098 self.invocation_params = invocation_params 1099 """The parameters with which pytest was invoked. 1100 1101 :type: InvocationParams 1102 """ 1103 1104 self._parser = Parser( 1105 usage=f"%(prog)s [options] [{FILE_OR_DIR}] [{FILE_OR_DIR}] [...]", 1106 processopt=self._processopt, 1107 _ispytest=True, 1108 ) 1109 self.pluginmanager = pluginmanager 1110 """The plugin manager handles plugin registration and hook invocation. 1111 1112 :type: PytestPluginManager 1113 """ 1114 1115 self.stash = Stash() 1116 """A place where plugins can store information on the config for their 1117 own use. 1118 1119 :type: Stash 1120 """ 1121 # Deprecated alias. Was never public. Can be removed in a few releases. 1122 self._store = self.stash 1123 1124 self.trace = self.pluginmanager.trace.root.get("config") 1125 self.hook: pluggy.HookRelay = PathAwareHookProxy(self.pluginmanager.hook) # type: ignore[assignment] 1126 self._inicache: dict[str, Any] = {} 1127 self._opt2dest: dict[str, str] = {} 1128 self._cleanup_stack = contextlib.ExitStack() 1129 self.pluginmanager.register(self, "pytestconfig") 1130 self._configured = False 1131 self.hook.pytest_addoption.call_historic( 1132 kwargs=dict(parser=self._parser, pluginmanager=self.pluginmanager) 1133 ) 1134 self.args_source = Config.ArgsSource.ARGS 1135 self.args: list[str] = [] 1136 1137 @property 1138 def inicfg(self) -> _DeprecatedInicfgProxy: 1139 return _DeprecatedInicfgProxy(self) 1140 1141 @property 1142 def rootpath(self) -> pathlib.Path: 1143 """The path to the :ref:`rootdir <rootdir>`. 1144 1145 .. versionadded:: 6.1 1146 """ 1147 return self._rootpath 1148 1149 @property 1150 def inipath(self) -> pathlib.Path | None: 1151 """The path to the :ref:`configfile <configfiles>`. 1152 1153 .. versionadded:: 6.1 1154 """ 1155 return self._inipath 1156 1157 def add_cleanup(self, func: Callable[[], None]) -> None: 1158 """Add a function to be called when the config object gets out of 1159 use (usually coinciding with pytest_unconfigure). 1160 """ 1161 self._cleanup_stack.callback(func) 1162 1163 def _do_configure(self) -> None: 1164 assert not self._configured 1165 self._configured = True 1166 self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) 1167 1168 def _ensure_unconfigure(self) -> None: 1169 try: 1170 if self._configured: 1171 self._configured = False 1172 try: 1173 self.hook.pytest_unconfigure(config=self) 1174 finally: 1175 self.hook.pytest_configure._call_history = [] 1176 finally: 1177 try: 1178 self._cleanup_stack.close() 1179 finally: 1180 self._cleanup_stack = contextlib.ExitStack() 1181 1182 def get_terminal_writer(self) -> TerminalWriter: 1183 terminalreporter: TerminalReporter | None = self.pluginmanager.get_plugin( 1184 "terminalreporter" 1185 ) 1186 assert terminalreporter is not None 1187 return terminalreporter._tw 1188 1189 def pytest_cmdline_parse( 1190 self, pluginmanager: PytestPluginManager, args: list[str] 1191 ) -> Config: 1192 try: 1193 self.parse(args) 1194 except UsageError: 1195 # Handle `--version --version` and `--help` here in a minimal fashion. 1196 # This gets done via helpconfig normally, but its 1197 # pytest_cmdline_main is not called in case of errors. 1198 if getattr(self.option, "version", False) or "--version" in args: 1199 from _pytest.helpconfig import show_version_verbose 1200 1201 # Note that `--version` (single argument) is handled early by `Config.main()`, so the only 1202 # way we are reaching this point is via `--version --version`. 1203 show_version_verbose(self) 1204 elif ( 1205 getattr(self.option, "help", False) or "--help" in args or "-h" in args 1206 ): 1207 self._parser.optparser.print_help() 1208 sys.stdout.write( 1209 "\nNOTE: displaying only minimal help due to UsageError.\n\n" 1210 ) 1211 1212 raise 1213 1214 return self 1215 1216 def notify_exception( 1217 self, 1218 excinfo: ExceptionInfo[BaseException], 1219 option: argparse.Namespace | None = None, 1220 ) -> None: 1221 if option and getattr(option, "fulltrace", False): 1222 style: TracebackStyle = "long" 1223 else: 1224 style = "native" 1225 excrepr = excinfo.getrepr( 1226 funcargs=True, showlocals=getattr(option, "showlocals", False), style=style 1227 ) 1228 res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) 1229 if not any(res): 1230 for line in str(excrepr).split("\n"): 1231 sys.stderr.write(f"INTERNALERROR> {line}\n") 1232 sys.stderr.flush() 1233 1234 def cwd_relative_nodeid(self, nodeid: str) -> str: 1235 # nodeid's are relative to the rootpath, compute relative to cwd. 1236 if self.invocation_params.dir != self.rootpath: 1237 base_path_part, *nodeid_part = nodeid.split("::") 1238 # Only process path part 1239 fullpath = self.rootpath / base_path_part 1240 relative_path = bestrelpath(self.invocation_params.dir, fullpath) 1241 1242 nodeid = "::".join([relative_path, *nodeid_part]) 1243 return nodeid 1244 1245 @classmethod 1246 def fromdictargs(cls, option_dict: Mapping[str, Any], args: list[str]) -> Config: 1247 """Constructor usable for subprocesses.""" 1248 config = get_config(args) 1249 config.option.__dict__.update(option_dict) 1250 config.parse(args, addopts=False) 1251 for x in config.option.plugins: 1252 config.pluginmanager.consider_pluginarg(x) 1253 return config 1254 1255 def _processopt(self, opt: Argument) -> None: 1256 for name in opt._short_opts + opt._long_opts: 1257 self._opt2dest[name] = opt.dest 1258 1259 if hasattr(opt, "default"): 1260 if not hasattr(self.option, opt.dest): 1261 setattr(self.option, opt.dest, opt.default) 1262 1263 @hookimpl(trylast=True) 1264 def pytest_load_initial_conftests(self, early_config: Config) -> None: 1265 # We haven't fully parsed the command line arguments yet, so 1266 # early_config.args it not set yet. But we need it for 1267 # discovering the initial conftests. So "pre-run" the logic here. 1268 # It will be done for real in `parse()`. 1269 args, _args_source = early_config._decide_args( 1270 args=early_config.known_args_namespace.file_or_dir, 1271 pyargs=early_config.known_args_namespace.pyargs, 1272 testpaths=early_config.getini("testpaths"), 1273 invocation_dir=early_config.invocation_params.dir, 1274 rootpath=early_config.rootpath, 1275 warn=False, 1276 ) 1277 self.pluginmanager._set_initial_conftests( 1278 args=args, 1279 pyargs=early_config.known_args_namespace.pyargs, 1280 noconftest=early_config.known_args_namespace.noconftest, 1281 rootpath=early_config.rootpath, 1282 confcutdir=early_config.known_args_namespace.confcutdir, 1283 invocation_dir=early_config.invocation_params.dir, 1284 importmode=early_config.known_args_namespace.importmode, 1285 consider_namespace_packages=early_config.getini( 1286 "consider_namespace_packages" 1287 ), 1288 ) 1289 1290 def _consider_importhook(self) -> None: 1291 """Install the PEP 302 import hook if using assertion rewriting. 1292 1293 Needs to parse the --assert=<mode> option from the commandline 1294 and find all the installed plugins to mark them for rewriting 1295 by the importhook. 1296 """ 1297 mode = getattr(self.known_args_namespace, "assertmode", "plain") 1298 1299 disable_autoload = getattr( 1300 self.known_args_namespace, "disable_plugin_autoload", False 1301 ) or bool(os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD")) 1302 if mode == "rewrite": 1303 import _pytest.assertion 1304 1305 try: 1306 hook = _pytest.assertion.install_importhook(self) 1307 except SystemError: 1308 mode = "plain" 1309 else: 1310 self._mark_plugins_for_rewrite(hook, disable_autoload) 1311 self._warn_about_missing_assertion(mode) 1312 1313 def _mark_plugins_for_rewrite( 1314 self, hook: AssertionRewritingHook, disable_autoload: bool 1315 ) -> None: 1316 """Given an importhook, mark for rewrite any top-level 1317 modules or packages in the distribution package for 1318 all pytest plugins.""" 1319 self.pluginmanager.rewrite_hook = hook 1320 1321 if disable_autoload: 1322 # We don't autoload from distribution package entry points, 1323 # no need to continue. 1324 return 1325 1326 package_files = ( 1327 str(file) 1328 for dist in importlib.metadata.distributions() 1329 if any(ep.group == "pytest11" for ep in dist.entry_points) 1330 for file in dist.files or [] 1331 ) 1332 1333 for name in _iter_rewritable_modules(package_files): 1334 hook.mark_rewrite(name) 1335 1336 def _configure_python_path(self) -> None: 1337 # `pythonpath = a b` will set `sys.path` to `[a, b, x, y, z, ...]` 1338 for path in reversed(self.getini("pythonpath")): 1339 sys.path.insert(0, str(path)) 1340 self.add_cleanup(self._unconfigure_python_path) 1341 1342 def _unconfigure_python_path(self) -> None: 1343 for path in self.getini("pythonpath"): 1344 path_str = str(path) 1345 if path_str in sys.path: 1346 sys.path.remove(path_str) 1347 1348 def _validate_args(self, args: list[str], via: str) -> list[str]: 1349 """Validate known args.""" 1350 self._parser.extra_info["config source"] = via 1351 try: 1352 self._parser.parse_known_and_unknown_args( 1353 args, namespace=copy.copy(self.option) 1354 ) 1355 finally: 1356 self._parser.extra_info.pop("config source", None) 1357 1358 return args 1359 1360 def _decide_args( 1361 self, 1362 *, 1363 args: list[str], 1364 pyargs: bool, 1365 testpaths: list[str], 1366 invocation_dir: pathlib.Path, 1367 rootpath: pathlib.Path, 1368 warn: bool, 1369 ) -> tuple[list[str], ArgsSource]: 1370 """Decide the args (initial paths/nodeids) to use given the relevant inputs. 1371 1372 :param warn: Whether can issue warnings. 1373 1374 :returns: The args and the args source. Guaranteed to be non-empty. 1375 """ 1376 if args: 1377 source = Config.ArgsSource.ARGS 1378 result = args 1379 else: 1380 if invocation_dir == rootpath: 1381 source = Config.ArgsSource.TESTPATHS 1382 if pyargs: 1383 result = testpaths 1384 else: 1385 result = [] 1386 for path in testpaths: 1387 result.extend(sorted(glob.iglob(path, recursive=True))) 1388 if testpaths and not result: 1389 if warn: 1390 warning_text = ( 1391 "No files were found in testpaths; " 1392 "consider removing or adjusting your testpaths configuration. " 1393 "Searching recursively from the current directory instead." 1394 ) 1395 self.issue_config_time_warning( 1396 PytestConfigWarning(warning_text), stacklevel=3 1397 ) 1398 else: 1399 result = [] 1400 if not result: 1401 source = Config.ArgsSource.INVOCATION_DIR 1402 result = [str(invocation_dir)] 1403 return result, source 1404 1405 @hookimpl(wrapper=True) 1406 def pytest_collection(self) -> Generator[None, object, object]: 1407 # Validate invalid configuration keys after collection is done so we 1408 # take in account options added by late-loading conftest files. 1409 try: 1410 return (yield) 1411 finally: 1412 self._validate_config_options() 1413 1414 def _checkversion(self) -> None: 1415 import pytest 1416 1417 minver_ini_value = self._inicfg.get("minversion", None) 1418 minver = minver_ini_value.value if minver_ini_value is not None else None 1419 if minver: 1420 # Imported lazily to improve start-up time. 1421 from packaging.version import Version 1422 1423 if not isinstance(minver, str): 1424 raise pytest.UsageError( 1425 f"{self.inipath}: 'minversion' must be a single value" 1426 ) 1427 1428 if Version(minver) > Version(pytest.__version__): 1429 raise pytest.UsageError( 1430 f"{self.inipath}: 'minversion' requires pytest-{minver}, actual pytest-{pytest.__version__}'" 1431 ) 1432 1433 def _validate_config_options(self) -> None: 1434 for key in sorted(self._get_unknown_ini_keys()): 1435 self._warn_or_fail_if_strict(f"Unknown config option: {key}\n") 1436 1437 def _validate_plugins(self) -> None: 1438 required_plugins = sorted(self.getini("required_plugins")) 1439 if not required_plugins: 1440 return 1441 1442 # Imported lazily to improve start-up time. 1443 from packaging.requirements import InvalidRequirement 1444 from packaging.requirements import Requirement 1445 from packaging.version import Version 1446 1447 plugin_info = self.pluginmanager.list_plugin_distinfo() 1448 plugin_dist_info = {dist.project_name: dist.version for _, dist in plugin_info} 1449 1450 missing_plugins = [] 1451 for required_plugin in required_plugins: 1452 try: 1453 req = Requirement(required_plugin) 1454 except InvalidRequirement: 1455 missing_plugins.append(required_plugin) 1456 continue 1457 1458 if req.name not in plugin_dist_info: 1459 missing_plugins.append(required_plugin) 1460 elif not req.specifier.contains( 1461 Version(plugin_dist_info[req.name]), prereleases=True 1462 ): 1463 missing_plugins.append(required_plugin) 1464 1465 if missing_plugins: 1466 raise UsageError( 1467 "Missing required plugins: {}".format(", ".join(missing_plugins)), 1468 ) 1469 1470 def _warn_or_fail_if_strict(self, message: str) -> None: 1471 strict_config = self.getini("strict_config") 1472 if strict_config is None: 1473 strict_config = self.getini("strict") 1474 if strict_config: 1475 raise UsageError(message) 1476 1477 self.issue_config_time_warning(PytestConfigWarning(message), stacklevel=3) 1478 1479 def _get_unknown_ini_keys(self) -> set[str]: 1480 known_keys = self._parser._inidict.keys() | self._parser._ini_aliases.keys() 1481 return self._inicfg.keys() - known_keys 1482 1483 def parse(self, args: list[str], addopts: bool = True) -> None: 1484 # Parse given cmdline arguments into this config object. 1485 assert self.args == [], ( 1486 "can only parse cmdline args at most once per Config object" 1487 ) 1488 1489 self.hook.pytest_addhooks.call_historic( 1490 kwargs=dict(pluginmanager=self.pluginmanager) 1491 ) 1492 1493 if addopts: 1494 env_addopts = os.environ.get("PYTEST_ADDOPTS", "") 1495 if len(env_addopts): 1496 args[:] = ( 1497 self._validate_args(shlex.split(env_addopts), "via PYTEST_ADDOPTS") 1498 + args 1499 ) 1500 1501 ns = self._parser.parse_known_args(args, namespace=copy.copy(self.option)) 1502 rootpath, inipath, inicfg, ignored_config_files = determine_setup( 1503 inifile=ns.inifilename, 1504 override_ini=ns.override_ini, 1505 args=ns.file_or_dir, 1506 rootdir_cmd_arg=ns.rootdir or None, 1507 invocation_dir=self.invocation_params.dir, 1508 ) 1509 self._rootpath = rootpath 1510 self._inipath = inipath 1511 self._ignored_config_files = ignored_config_files 1512 self._inicfg = inicfg 1513 self._parser.extra_info["rootdir"] = str(self.rootpath) 1514 self._parser.extra_info["inifile"] = str(self.inipath) 1515 1516 self._parser.addini("addopts", "Extra command line options", "args") 1517 self._parser.addini("minversion", "Minimally required pytest version") 1518 self._parser.addini( 1519 "pythonpath", type="paths", help="Add paths to sys.path", default=[] 1520 ) 1521 self._parser.addini( 1522 "required_plugins", 1523 "Plugins that must be present for pytest to run", 1524 type="args", 1525 default=[], 1526 ) 1527 1528 if addopts: 1529 args[:] = ( 1530 self._validate_args(self.getini("addopts"), "via addopts config") + args 1531 ) 1532 1533 self.known_args_namespace = self._parser.parse_known_args( 1534 args, namespace=copy.copy(self.option) 1535 ) 1536 self._checkversion() 1537 self._consider_importhook() 1538 self._configure_python_path() 1539 self.pluginmanager.consider_preparse(args, exclude_only=False) 1540 if ( 1541 not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD") 1542 and not self.known_args_namespace.disable_plugin_autoload 1543 ): 1544 # Autoloading from distribution package entry point has 1545 # not been disabled. 1546 self.pluginmanager.load_setuptools_entrypoints("pytest11") 1547 # Otherwise only plugins explicitly specified in PYTEST_PLUGINS 1548 # are going to be loaded. 1549 self.pluginmanager.consider_env() 1550 1551 self._parser.parse_known_args(args, namespace=self.known_args_namespace) 1552 1553 self._validate_plugins() 1554 self._warn_about_skipped_plugins() 1555 1556 if self.known_args_namespace.confcutdir is None: 1557 if self.inipath is not None: 1558 confcutdir = str(self.inipath.parent) 1559 else: 1560 confcutdir = str(self.rootpath) 1561 self.known_args_namespace.confcutdir = confcutdir 1562 try: 1563 self.hook.pytest_load_initial_conftests( 1564 early_config=self, args=args, parser=self._parser 1565 ) 1566 except ConftestImportFailure as e: 1567 if self.known_args_namespace.help or self.known_args_namespace.version: 1568 # we don't want to prevent --help/--version to work 1569 # so just let it pass and print a warning at the end 1570 self.issue_config_time_warning( 1571 PytestConfigWarning(f"could not load initial conftests: {e.path}"), 1572 stacklevel=2, 1573 ) 1574 else: 1575 raise 1576 1577 try: 1578 self._parser.parse(args, namespace=self.option) 1579 except PrintHelp: 1580 return 1581 1582 self.args, self.args_source = self._decide_args( 1583 args=getattr(self.option, FILE_OR_DIR), 1584 pyargs=self.option.pyargs, 1585 testpaths=self.getini("testpaths"), 1586 invocation_dir=self.invocation_params.dir, 1587 rootpath=self.rootpath, 1588 warn=True, 1589 ) 1590 1591 def issue_config_time_warning(self, warning: Warning, stacklevel: int) -> None: 1592 """Issue and handle a warning during the "configure" stage. 1593 1594 During ``pytest_configure`` we can't capture warnings using the ``catch_warnings_for_item`` 1595 function because it is not possible to have hook wrappers around ``pytest_configure``. 1596 1597 This function is mainly intended for plugins that need to issue warnings during 1598 ``pytest_configure`` (or similar stages). 1599 1600 :param warning: The warning instance. 1601 :param stacklevel: stacklevel forwarded to warnings.warn. 1602 """ 1603 if self.pluginmanager.is_blocked("warnings"): 1604 return 1605 1606 cmdline_filters = self.known_args_namespace.pythonwarnings or [] 1607 config_filters = self.getini("filterwarnings") 1608 1609 with warnings.catch_warnings(record=True) as records: 1610 warnings.simplefilter("always", type(warning)) 1611 apply_warning_filters(config_filters, cmdline_filters) 1612 warnings.warn(warning, stacklevel=stacklevel) 1613 1614 if records: 1615 frame = sys._getframe(stacklevel - 1) 1616 location = frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name 1617 self.hook.pytest_warning_recorded.call_historic( 1618 kwargs=dict( 1619 warning_message=records[0], 1620 when="config", 1621 nodeid="", 1622 location=location, 1623 ) 1624 ) 1625 1626 def addinivalue_line(self, name: str, line: str) -> None: 1627 """Add a line to a configuration option. The option must have been 1628 declared but might not yet be set in which case the line becomes 1629 the first line in its value.""" 1630 x = self.getini(name) 1631 assert isinstance(x, list) 1632 x.append(line) # modifies the cached list inline 1633 1634 def getini(self, name: str) -> Any: 1635 """Return configuration value the an :ref:`configuration file <configfiles>`. 1636 1637 If a configuration value is not defined in a 1638 :ref:`configuration file <configfiles>`, then the ``default`` value 1639 provided while registering the configuration through 1640 :func:`parser.addini <pytest.Parser.addini>` will be returned. 1641 Please note that you can even provide ``None`` as a valid 1642 default value. 1643 1644 If ``default`` is not provided while registering using 1645 :func:`parser.addini <pytest.Parser.addini>`, then a default value 1646 based on the ``type`` parameter passed to 1647 :func:`parser.addini <pytest.Parser.addini>` will be returned. 1648 The default values based on ``type`` are: 1649 ``paths``, ``pathlist``, ``args`` and ``linelist`` : empty list ``[]`` 1650 ``bool`` : ``False`` 1651 ``string`` : empty string ``""`` 1652 ``int`` : ``0`` 1653 ``float`` : ``0.0`` 1654 1655 If neither the ``default`` nor the ``type`` parameter is passed 1656 while registering the configuration through 1657 :func:`parser.addini <pytest.Parser.addini>`, then the configuration 1658 is treated as a string and a default empty string '' is returned. 1659 1660 If the specified name hasn't been registered through a prior 1661 :func:`parser.addini <pytest.Parser.addini>` call (usually from a 1662 plugin), a ValueError is raised. 1663 """ 1664 canonical_name = self._parser._ini_aliases.get(name, name) 1665 try: 1666 return self._inicache[canonical_name] 1667 except KeyError: 1668 pass 1669 self._inicache[canonical_name] = val = self._getini(canonical_name) 1670 return val 1671 1672 # Meant for easy monkeypatching by legacypath plugin. 1673 # Can be inlined back (with no cover removed) once legacypath is gone. 1674 def _getini_unknown_type(self, name: str, type: str, value: object): 1675 msg = ( 1676 f"Option {name} has unknown configuration type {type} with value {value!r}" 1677 ) 1678 raise ValueError(msg) # pragma: no cover 1679 1680 def _getini(self, name: str): 1681 # If this is an alias, resolve to canonical name. 1682 canonical_name = self._parser._ini_aliases.get(name, name) 1683 1684 try: 1685 _description, type, default = self._parser._inidict[canonical_name] 1686 except KeyError as e: 1687 raise ValueError(f"unknown configuration value: {name!r}") from e 1688 1689 # Collect all possible values (canonical name + aliases) from _inicfg. 1690 # Each candidate is (ConfigValue, is_canonical). 1691 candidates = [] 1692 if canonical_name in self._inicfg: 1693 candidates.append((self._inicfg[canonical_name], True)) 1694 for alias, target in self._parser._ini_aliases.items(): 1695 if target == canonical_name and alias in self._inicfg: 1696 candidates.append((self._inicfg[alias], False)) 1697 1698 if not candidates: 1699 return default 1700 1701 # Pick the best candidate based on precedence: 1702 # 1. CLI override takes precedence over file, then 1703 # 2. Canonical name takes precedence over alias. 1704 selected = max(candidates, key=lambda x: (x[0].origin == "override", x[1]))[0] 1705 value = selected.value 1706 mode = selected.mode 1707 1708 if mode == "ini": 1709 # In ini mode, values are always str | list[str]. 1710 assert isinstance(value, (str, list)) 1711 return self._getini_ini(name, canonical_name, type, value, default) 1712 elif mode == "toml": 1713 return self._getini_toml(name, canonical_name, type, value, default) 1714 else: 1715 assert_never(mode) 1716 1717 def _getini_ini( 1718 self, 1719 name: str, 1720 canonical_name: str, 1721 type: str, 1722 value: str | list[str], 1723 default: Any, 1724 ): 1725 """Handle config values read in INI mode. 1726 1727 In INI mode, values are stored as str or list[str] only, and coerced 1728 from string based on the registered type. 1729 """ 1730 # Note: some coercions are only required if we are reading from .ini 1731 # files, because the file format doesn't contain type information, but 1732 # when reading from toml (in ini mode) we will get either str or list of 1733 # str values (see load_config_dict_from_file). For example: 1734 # 1735 # ini: 1736 # a_line_list = "tests acceptance" 1737 # 1738 # in this case, we need to split the string to obtain a list of strings. 1739 # 1740 # toml (ini mode): 1741 # a_line_list = ["tests", "acceptance"] 1742 # 1743 # in this case, we already have a list ready to use. 1744 if type == "paths": 1745 dp = ( 1746 self.inipath.parent 1747 if self.inipath is not None 1748 else self.invocation_params.dir 1749 ) 1750 input_values = shlex.split(value) if isinstance(value, str) else value 1751 return [dp / x for x in input_values] 1752 elif type == "args": 1753 return shlex.split(value) if isinstance(value, str) else value 1754 elif type == "linelist": 1755 if isinstance(value, str): 1756 return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] 1757 else: 1758 return value 1759 elif type == "bool": 1760 return _strtobool(str(value).strip()) 1761 elif type == "string": 1762 return value 1763 elif type == "int": 1764 if not isinstance(value, str): 1765 raise TypeError( 1766 f"Expected an int string for option {name} of type integer, but got: {value!r}" 1767 ) from None 1768 return int(value) 1769 elif type == "float": 1770 if not isinstance(value, str): 1771 raise TypeError( 1772 f"Expected a float string for option {name} of type float, but got: {value!r}" 1773 ) from None 1774 return float(value) 1775 else: 1776 return self._getini_unknown_type(name, type, value) 1777 1778 def _getini_toml( 1779 self, 1780 name: str, 1781 canonical_name: str, 1782 type: str, 1783 value: object, 1784 default: Any, 1785 ): 1786 """Handle TOML config values with strict type validation and no coercion. 1787 1788 In TOML mode, values already have native types from TOML parsing. 1789 We validate types match expectations exactly, including list items. 1790 """ 1791 value_type = builtins.type(value).__name__ 1792 if type == "paths": 1793 # Expect a list of strings. 1794 if not isinstance(value, list): 1795 raise TypeError( 1796 f"{self.inipath}: config option '{name}' expects a list for type 'paths', " 1797 f"got {value_type}: {value!r}" 1798 ) 1799 for i, item in enumerate(value): 1800 if not isinstance(item, str): 1801 item_type = builtins.type(item).__name__ 1802 raise TypeError( 1803 f"{self.inipath}: config option '{name}' expects a list of strings, " 1804 f"but item at index {i} is {item_type}: {item!r}" 1805 ) 1806 dp = ( 1807 self.inipath.parent 1808 if self.inipath is not None 1809 else self.invocation_params.dir 1810 ) 1811 return [dp / x for x in value] 1812 elif type in {"args", "linelist"}: 1813 # Expect a list of strings. 1814 if not isinstance(value, list): 1815 raise TypeError( 1816 f"{self.inipath}: config option '{name}' expects a list for type '{type}', " 1817 f"got {value_type}: {value!r}" 1818 ) 1819 for i, item in enumerate(value): 1820 if not isinstance(item, str): 1821 item_type = builtins.type(item).__name__ 1822 raise TypeError( 1823 f"{self.inipath}: config option '{name}' expects a list of strings, " 1824 f"but item at index {i} is {item_type}: {item!r}" 1825 ) 1826 return list(value) 1827 elif type == "bool": 1828 # Expect a boolean. 1829 if not isinstance(value, bool): 1830 raise TypeError( 1831 f"{self.inipath}: config option '{name}' expects a bool, " 1832 f"got {value_type}: {value!r}" 1833 ) 1834 return value 1835 elif type == "int": 1836 # Expect an integer (but not bool, which is a subclass of int). 1837 if not isinstance(value, int) or isinstance(value, bool): 1838 raise TypeError( 1839 f"{self.inipath}: config option '{name}' expects an int, " 1840 f"got {value_type}: {value!r}" 1841 ) 1842 return value 1843 elif type == "float": 1844 # Expect a float or integer only. 1845 if not isinstance(value, (float, int)) or isinstance(value, bool): 1846 raise TypeError( 1847 f"{self.inipath}: config option '{name}' expects a float, " 1848 f"got {value_type}: {value!r}" 1849 ) 1850 return value 1851 elif type == "string": 1852 # Expect a string. 1853 if not isinstance(value, str): 1854 raise TypeError( 1855 f"{self.inipath}: config option '{name}' expects a string, " 1856 f"got {value_type}: {value!r}" 1857 ) 1858 return value 1859 else: 1860 return self._getini_unknown_type(name, type, value) 1861 1862 def _getconftest_pathlist( 1863 self, name: str, path: pathlib.Path 1864 ) -> list[pathlib.Path] | None: 1865 try: 1866 mod, relroots = self.pluginmanager._rget_with_confmod(name, path) 1867 except KeyError: 1868 return None 1869 assert mod.__file__ is not None 1870 modpath = pathlib.Path(mod.__file__).parent 1871 values: list[pathlib.Path] = [] 1872 for relroot in relroots: 1873 if isinstance(relroot, os.PathLike): 1874 relroot = pathlib.Path(relroot) 1875 else: 1876 relroot = relroot.replace("/", os.sep) 1877 relroot = absolutepath(modpath / relroot) 1878 values.append(relroot) 1879 return values 1880 1881 def getoption(self, name: str, default: Any = notset, skip: bool = False): 1882 """Return command line option value. 1883 1884 :param name: Name of the option. You may also specify 1885 the literal ``--OPT`` option instead of the "dest" option name. 1886 :param default: Fallback value if no option of that name is **declared** via :hook:`pytest_addoption`. 1887 Note this parameter will be ignored when the option is **declared** even if the option's value is ``None``. 1888 :param skip: If ``True``, raise :func:`pytest.skip` if option is undeclared or has a ``None`` value. 1889 Note that even if ``True``, if a default was specified it will be returned instead of a skip. 1890 """ 1891 name = self._opt2dest.get(name, name) 1892 try: 1893 val = getattr(self.option, name) 1894 if val is None and skip: 1895 raise AttributeError(name) 1896 return val 1897 except AttributeError as e: 1898 if default is not notset: 1899 return default 1900 if skip: 1901 import pytest 1902 1903 pytest.skip(f"no {name!r} option found") 1904 raise ValueError(f"no option named {name!r}") from e 1905 1906 def getvalue(self, name: str, path=None): 1907 """Deprecated, use getoption() instead.""" 1908 return self.getoption(name) 1909 1910 def getvalueorskip(self, name: str, path=None): 1911 """Deprecated, use getoption(skip=True) instead.""" 1912 return self.getoption(name, skip=True) 1913 1914 #: Verbosity type for failed assertions (see :confval:`verbosity_assertions`). 1915 VERBOSITY_ASSERTIONS: Final = "assertions" 1916 #: Verbosity type for test case execution (see :confval:`verbosity_test_cases`). 1917 VERBOSITY_TEST_CASES: Final = "test_cases" 1918 #: Verbosity type for failed subtests (see :confval:`verbosity_subtests`). 1919 VERBOSITY_SUBTESTS: Final = "subtests" 1920 1921 _VERBOSITY_INI_DEFAULT: Final = "auto" 1922 1923 def get_verbosity(self, verbosity_type: str | None = None) -> int: 1924 r"""Retrieve the verbosity level for a fine-grained verbosity type. 1925 1926 :param verbosity_type: Verbosity type to get level for. If a level is 1927 configured for the given type, that value will be returned. If the 1928 given type is not a known verbosity type, the global verbosity 1929 level will be returned. If the given type is None (default), the 1930 global verbosity level will be returned. 1931 1932 To configure a level for a fine-grained verbosity type, the 1933 configuration file should have a setting for the configuration name 1934 and a numeric value for the verbosity level. A special value of "auto" 1935 can be used to explicitly use the global verbosity level. 1936 1937 Example: 1938 1939 .. tab:: toml 1940 1941 .. code-block:: toml 1942 1943 [tool.pytest] 1944 verbosity_assertions = 2 1945 1946 .. tab:: ini 1947 1948 .. code-block:: ini 1949 1950 [pytest] 1951 verbosity_assertions = 2 1952 1953 .. code-block:: console 1954 1955 pytest -v 1956 1957 .. code-block:: python 1958 1959 print(config.get_verbosity()) # 1 1960 print(config.get_verbosity(Config.VERBOSITY_ASSERTIONS)) # 2 1961 """ 1962 global_level = self.getoption("verbose", default=0) 1963 assert isinstance(global_level, int) 1964 if verbosity_type is None: 1965 return global_level 1966 1967 ini_name = Config._verbosity_ini_name(verbosity_type) 1968 if ini_name not in self._parser._inidict: 1969 return global_level 1970 1971 level = self.getini(ini_name) 1972 if level == Config._VERBOSITY_INI_DEFAULT: 1973 return global_level 1974 1975 return int(level) 1976 1977 @staticmethod 1978 def _verbosity_ini_name(verbosity_type: str) -> str: 1979 return f"verbosity_{verbosity_type}" 1980 1981 @staticmethod 1982 def _add_verbosity_ini(parser: Parser, verbosity_type: str, help: str) -> None: 1983 """Add a output verbosity configuration option for the given output type. 1984 1985 :param parser: Parser for command line arguments and config-file values. 1986 :param verbosity_type: Fine-grained verbosity category. 1987 :param help: Description of the output this type controls. 1988 1989 The value should be retrieved via a call to 1990 :py:func:`config.get_verbosity(type) <pytest.Config.get_verbosity>`. 1991 """ 1992 parser.addini( 1993 Config._verbosity_ini_name(verbosity_type), 1994 help=help, 1995 type="string", 1996 default=Config._VERBOSITY_INI_DEFAULT, 1997 ) 1998 1999 def _warn_about_missing_assertion(self, mode: str) -> None: 2000 if not _assertion_supported(): 2001 if mode == "plain": 2002 warning_text = ( 2003 "ASSERTIONS ARE NOT EXECUTED" 2004 " and FAILING TESTS WILL PASS. Are you" 2005 " using python -O?" 2006 ) 2007 else: 2008 warning_text = ( 2009 "assertions not in test modules or" 2010 " plugins will be ignored" 2011 " because assert statements are not executed " 2012 "by the underlying Python interpreter " 2013 "(are you using python -O?)\n" 2014 ) 2015 self.issue_config_time_warning( 2016 PytestConfigWarning(warning_text), 2017 stacklevel=3, 2018 ) 2019 2020 def _warn_about_skipped_plugins(self) -> None: 2021 for module_name, msg in self.pluginmanager.skipped_plugins: 2022 self.issue_config_time_warning( 2023 PytestConfigWarning(f"skipped plugin {module_name!r}: {msg}"), 2024 stacklevel=2, 2025 )
Access to configuration values, pluginmanager and plugin hooks.
Parameters
PytestPluginManager pluginmanager: A pytest PluginManager.
InvocationParams invocation_params: Object containing parameters regarding the
pytest.main()invocation.
1081 def __init__( 1082 self, 1083 pluginmanager: PytestPluginManager, 1084 *, 1085 invocation_params: InvocationParams | None = None, 1086 ) -> None: 1087 if invocation_params is None: 1088 invocation_params = self.InvocationParams( 1089 args=(), plugins=None, dir=pathlib.Path.cwd() 1090 ) 1091 1092 self.option = argparse.Namespace() 1093 """Access to command line option as attributes. 1094 1095 :type: argparse.Namespace 1096 """ 1097 1098 self.invocation_params = invocation_params 1099 """The parameters with which pytest was invoked. 1100 1101 :type: InvocationParams 1102 """ 1103 1104 self._parser = Parser( 1105 usage=f"%(prog)s [options] [{FILE_OR_DIR}] [{FILE_OR_DIR}] [...]", 1106 processopt=self._processopt, 1107 _ispytest=True, 1108 ) 1109 self.pluginmanager = pluginmanager 1110 """The plugin manager handles plugin registration and hook invocation. 1111 1112 :type: PytestPluginManager 1113 """ 1114 1115 self.stash = Stash() 1116 """A place where plugins can store information on the config for their 1117 own use. 1118 1119 :type: Stash 1120 """ 1121 # Deprecated alias. Was never public. Can be removed in a few releases. 1122 self._store = self.stash 1123 1124 self.trace = self.pluginmanager.trace.root.get("config") 1125 self.hook: pluggy.HookRelay = PathAwareHookProxy(self.pluginmanager.hook) # type: ignore[assignment] 1126 self._inicache: dict[str, Any] = {} 1127 self._opt2dest: dict[str, str] = {} 1128 self._cleanup_stack = contextlib.ExitStack() 1129 self.pluginmanager.register(self, "pytestconfig") 1130 self._configured = False 1131 self.hook.pytest_addoption.call_historic( 1132 kwargs=dict(parser=self._parser, pluginmanager=self.pluginmanager) 1133 ) 1134 self.args_source = Config.ArgsSource.ARGS 1135 self.args: list[str] = []
1141 @property 1142 def rootpath(self) -> pathlib.Path: 1143 """The path to the :ref:`rootdir <rootdir>`. 1144 1145 .. versionadded:: 6.1 1146 """ 1147 return self._rootpath
The path to the :ref:rootdir <rootdir>.
New in version 6.1.
1149 @property 1150 def inipath(self) -> pathlib.Path | None: 1151 """The path to the :ref:`configfile <configfiles>`. 1152 1153 .. versionadded:: 6.1 1154 """ 1155 return self._inipath
The path to the :ref:configfile <configfiles>.
New in version 6.1.
1157 def add_cleanup(self, func: Callable[[], None]) -> None: 1158 """Add a function to be called when the config object gets out of 1159 use (usually coinciding with pytest_unconfigure). 1160 """ 1161 self._cleanup_stack.callback(func)
Add a function to be called when the config object gets out of use (usually coinciding with pytest_unconfigure).
1189 def pytest_cmdline_parse( 1190 self, pluginmanager: PytestPluginManager, args: list[str] 1191 ) -> Config: 1192 try: 1193 self.parse(args) 1194 except UsageError: 1195 # Handle `--version --version` and `--help` here in a minimal fashion. 1196 # This gets done via helpconfig normally, but its 1197 # pytest_cmdline_main is not called in case of errors. 1198 if getattr(self.option, "version", False) or "--version" in args: 1199 from _pytest.helpconfig import show_version_verbose 1200 1201 # Note that `--version` (single argument) is handled early by `Config.main()`, so the only 1202 # way we are reaching this point is via `--version --version`. 1203 show_version_verbose(self) 1204 elif ( 1205 getattr(self.option, "help", False) or "--help" in args or "-h" in args 1206 ): 1207 self._parser.optparser.print_help() 1208 sys.stdout.write( 1209 "\nNOTE: displaying only minimal help due to UsageError.\n\n" 1210 ) 1211 1212 raise 1213 1214 return self
1216 def notify_exception( 1217 self, 1218 excinfo: ExceptionInfo[BaseException], 1219 option: argparse.Namespace | None = None, 1220 ) -> None: 1221 if option and getattr(option, "fulltrace", False): 1222 style: TracebackStyle = "long" 1223 else: 1224 style = "native" 1225 excrepr = excinfo.getrepr( 1226 funcargs=True, showlocals=getattr(option, "showlocals", False), style=style 1227 ) 1228 res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) 1229 if not any(res): 1230 for line in str(excrepr).split("\n"): 1231 sys.stderr.write(f"INTERNALERROR> {line}\n") 1232 sys.stderr.flush()
1234 def cwd_relative_nodeid(self, nodeid: str) -> str: 1235 # nodeid's are relative to the rootpath, compute relative to cwd. 1236 if self.invocation_params.dir != self.rootpath: 1237 base_path_part, *nodeid_part = nodeid.split("::") 1238 # Only process path part 1239 fullpath = self.rootpath / base_path_part 1240 relative_path = bestrelpath(self.invocation_params.dir, fullpath) 1241 1242 nodeid = "::".join([relative_path, *nodeid_part]) 1243 return nodeid
1245 @classmethod 1246 def fromdictargs(cls, option_dict: Mapping[str, Any], args: list[str]) -> Config: 1247 """Constructor usable for subprocesses.""" 1248 config = get_config(args) 1249 config.option.__dict__.update(option_dict) 1250 config.parse(args, addopts=False) 1251 for x in config.option.plugins: 1252 config.pluginmanager.consider_pluginarg(x) 1253 return config
Constructor usable for subprocesses.
1263 @hookimpl(trylast=True) 1264 def pytest_load_initial_conftests(self, early_config: Config) -> None: 1265 # We haven't fully parsed the command line arguments yet, so 1266 # early_config.args it not set yet. But we need it for 1267 # discovering the initial conftests. So "pre-run" the logic here. 1268 # It will be done for real in `parse()`. 1269 args, _args_source = early_config._decide_args( 1270 args=early_config.known_args_namespace.file_or_dir, 1271 pyargs=early_config.known_args_namespace.pyargs, 1272 testpaths=early_config.getini("testpaths"), 1273 invocation_dir=early_config.invocation_params.dir, 1274 rootpath=early_config.rootpath, 1275 warn=False, 1276 ) 1277 self.pluginmanager._set_initial_conftests( 1278 args=args, 1279 pyargs=early_config.known_args_namespace.pyargs, 1280 noconftest=early_config.known_args_namespace.noconftest, 1281 rootpath=early_config.rootpath, 1282 confcutdir=early_config.known_args_namespace.confcutdir, 1283 invocation_dir=early_config.invocation_params.dir, 1284 importmode=early_config.known_args_namespace.importmode, 1285 consider_namespace_packages=early_config.getini( 1286 "consider_namespace_packages" 1287 ), 1288 )
1405 @hookimpl(wrapper=True) 1406 def pytest_collection(self) -> Generator[None, object, object]: 1407 # Validate invalid configuration keys after collection is done so we 1408 # take in account options added by late-loading conftest files. 1409 try: 1410 return (yield) 1411 finally: 1412 self._validate_config_options()
1483 def parse(self, args: list[str], addopts: bool = True) -> None: 1484 # Parse given cmdline arguments into this config object. 1485 assert self.args == [], ( 1486 "can only parse cmdline args at most once per Config object" 1487 ) 1488 1489 self.hook.pytest_addhooks.call_historic( 1490 kwargs=dict(pluginmanager=self.pluginmanager) 1491 ) 1492 1493 if addopts: 1494 env_addopts = os.environ.get("PYTEST_ADDOPTS", "") 1495 if len(env_addopts): 1496 args[:] = ( 1497 self._validate_args(shlex.split(env_addopts), "via PYTEST_ADDOPTS") 1498 + args 1499 ) 1500 1501 ns = self._parser.parse_known_args(args, namespace=copy.copy(self.option)) 1502 rootpath, inipath, inicfg, ignored_config_files = determine_setup( 1503 inifile=ns.inifilename, 1504 override_ini=ns.override_ini, 1505 args=ns.file_or_dir, 1506 rootdir_cmd_arg=ns.rootdir or None, 1507 invocation_dir=self.invocation_params.dir, 1508 ) 1509 self._rootpath = rootpath 1510 self._inipath = inipath 1511 self._ignored_config_files = ignored_config_files 1512 self._inicfg = inicfg 1513 self._parser.extra_info["rootdir"] = str(self.rootpath) 1514 self._parser.extra_info["inifile"] = str(self.inipath) 1515 1516 self._parser.addini("addopts", "Extra command line options", "args") 1517 self._parser.addini("minversion", "Minimally required pytest version") 1518 self._parser.addini( 1519 "pythonpath", type="paths", help="Add paths to sys.path", default=[] 1520 ) 1521 self._parser.addini( 1522 "required_plugins", 1523 "Plugins that must be present for pytest to run", 1524 type="args", 1525 default=[], 1526 ) 1527 1528 if addopts: 1529 args[:] = ( 1530 self._validate_args(self.getini("addopts"), "via addopts config") + args 1531 ) 1532 1533 self.known_args_namespace = self._parser.parse_known_args( 1534 args, namespace=copy.copy(self.option) 1535 ) 1536 self._checkversion() 1537 self._consider_importhook() 1538 self._configure_python_path() 1539 self.pluginmanager.consider_preparse(args, exclude_only=False) 1540 if ( 1541 not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD") 1542 and not self.known_args_namespace.disable_plugin_autoload 1543 ): 1544 # Autoloading from distribution package entry point has 1545 # not been disabled. 1546 self.pluginmanager.load_setuptools_entrypoints("pytest11") 1547 # Otherwise only plugins explicitly specified in PYTEST_PLUGINS 1548 # are going to be loaded. 1549 self.pluginmanager.consider_env() 1550 1551 self._parser.parse_known_args(args, namespace=self.known_args_namespace) 1552 1553 self._validate_plugins() 1554 self._warn_about_skipped_plugins() 1555 1556 if self.known_args_namespace.confcutdir is None: 1557 if self.inipath is not None: 1558 confcutdir = str(self.inipath.parent) 1559 else: 1560 confcutdir = str(self.rootpath) 1561 self.known_args_namespace.confcutdir = confcutdir 1562 try: 1563 self.hook.pytest_load_initial_conftests( 1564 early_config=self, args=args, parser=self._parser 1565 ) 1566 except ConftestImportFailure as e: 1567 if self.known_args_namespace.help or self.known_args_namespace.version: 1568 # we don't want to prevent --help/--version to work 1569 # so just let it pass and print a warning at the end 1570 self.issue_config_time_warning( 1571 PytestConfigWarning(f"could not load initial conftests: {e.path}"), 1572 stacklevel=2, 1573 ) 1574 else: 1575 raise 1576 1577 try: 1578 self._parser.parse(args, namespace=self.option) 1579 except PrintHelp: 1580 return 1581 1582 self.args, self.args_source = self._decide_args( 1583 args=getattr(self.option, FILE_OR_DIR), 1584 pyargs=self.option.pyargs, 1585 testpaths=self.getini("testpaths"), 1586 invocation_dir=self.invocation_params.dir, 1587 rootpath=self.rootpath, 1588 warn=True, 1589 )
1591 def issue_config_time_warning(self, warning: Warning, stacklevel: int) -> None: 1592 """Issue and handle a warning during the "configure" stage. 1593 1594 During ``pytest_configure`` we can't capture warnings using the ``catch_warnings_for_item`` 1595 function because it is not possible to have hook wrappers around ``pytest_configure``. 1596 1597 This function is mainly intended for plugins that need to issue warnings during 1598 ``pytest_configure`` (or similar stages). 1599 1600 :param warning: The warning instance. 1601 :param stacklevel: stacklevel forwarded to warnings.warn. 1602 """ 1603 if self.pluginmanager.is_blocked("warnings"): 1604 return 1605 1606 cmdline_filters = self.known_args_namespace.pythonwarnings or [] 1607 config_filters = self.getini("filterwarnings") 1608 1609 with warnings.catch_warnings(record=True) as records: 1610 warnings.simplefilter("always", type(warning)) 1611 apply_warning_filters(config_filters, cmdline_filters) 1612 warnings.warn(warning, stacklevel=stacklevel) 1613 1614 if records: 1615 frame = sys._getframe(stacklevel - 1) 1616 location = frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name 1617 self.hook.pytest_warning_recorded.call_historic( 1618 kwargs=dict( 1619 warning_message=records[0], 1620 when="config", 1621 nodeid="", 1622 location=location, 1623 ) 1624 )
Issue and handle a warning during the "configure" stage.
During pytest_configure we can't capture warnings using the catch_warnings_for_item
function because it is not possible to have hook wrappers around pytest_configure.
This function is mainly intended for plugins that need to issue warnings during
pytest_configure (or similar stages).
Parameters
- warning: The warning instance.
- stacklevel: stacklevel forwarded to warnings.warn.
1626 def addinivalue_line(self, name: str, line: str) -> None: 1627 """Add a line to a configuration option. The option must have been 1628 declared but might not yet be set in which case the line becomes 1629 the first line in its value.""" 1630 x = self.getini(name) 1631 assert isinstance(x, list) 1632 x.append(line) # modifies the cached list inline
Add a line to a configuration option. The option must have been declared but might not yet be set in which case the line becomes the first line in its value.
1634 def getini(self, name: str) -> Any: 1635 """Return configuration value the an :ref:`configuration file <configfiles>`. 1636 1637 If a configuration value is not defined in a 1638 :ref:`configuration file <configfiles>`, then the ``default`` value 1639 provided while registering the configuration through 1640 :func:`parser.addini <pytest.Parser.addini>` will be returned. 1641 Please note that you can even provide ``None`` as a valid 1642 default value. 1643 1644 If ``default`` is not provided while registering using 1645 :func:`parser.addini <pytest.Parser.addini>`, then a default value 1646 based on the ``type`` parameter passed to 1647 :func:`parser.addini <pytest.Parser.addini>` will be returned. 1648 The default values based on ``type`` are: 1649 ``paths``, ``pathlist``, ``args`` and ``linelist`` : empty list ``[]`` 1650 ``bool`` : ``False`` 1651 ``string`` : empty string ``""`` 1652 ``int`` : ``0`` 1653 ``float`` : ``0.0`` 1654 1655 If neither the ``default`` nor the ``type`` parameter is passed 1656 while registering the configuration through 1657 :func:`parser.addini <pytest.Parser.addini>`, then the configuration 1658 is treated as a string and a default empty string '' is returned. 1659 1660 If the specified name hasn't been registered through a prior 1661 :func:`parser.addini <pytest.Parser.addini>` call (usually from a 1662 plugin), a ValueError is raised. 1663 """ 1664 canonical_name = self._parser._ini_aliases.get(name, name) 1665 try: 1666 return self._inicache[canonical_name] 1667 except KeyError: 1668 pass 1669 self._inicache[canonical_name] = val = self._getini(canonical_name) 1670 return val
Return configuration value the an :ref:configuration file <configfiles>.
If a configuration value is not defined in a
:ref:configuration file <configfiles>, then the default value
provided while registering the configuration through
parser.addini <pytest.Parser.addini>() will be returned.
Please note that you can even provide None as a valid
default value.
If default is not provided while registering using
parser.addini <pytest.Parser.addini>(), then a default value
based on the type parameter passed to
parser.addini <pytest.Parser.addini>() will be returned.
The default values based on type are:
paths, pathlist, args and linelist : empty list []
bool : False
string : empty string ""
int : 0
float : 0.0
If neither the default nor the type parameter is passed
while registering the configuration through
parser.addini <pytest.Parser.addini>(), then the configuration
is treated as a string and a default empty string '' is returned.
If the specified name hasn't been registered through a prior
parser.addini <pytest.Parser.addini>() call (usually from a
plugin), a ValueError is raised.
1881 def getoption(self, name: str, default: Any = notset, skip: bool = False): 1882 """Return command line option value. 1883 1884 :param name: Name of the option. You may also specify 1885 the literal ``--OPT`` option instead of the "dest" option name. 1886 :param default: Fallback value if no option of that name is **declared** via :hook:`pytest_addoption`. 1887 Note this parameter will be ignored when the option is **declared** even if the option's value is ``None``. 1888 :param skip: If ``True``, raise :func:`pytest.skip` if option is undeclared or has a ``None`` value. 1889 Note that even if ``True``, if a default was specified it will be returned instead of a skip. 1890 """ 1891 name = self._opt2dest.get(name, name) 1892 try: 1893 val = getattr(self.option, name) 1894 if val is None and skip: 1895 raise AttributeError(name) 1896 return val 1897 except AttributeError as e: 1898 if default is not notset: 1899 return default 1900 if skip: 1901 import pytest 1902 1903 pytest.skip(f"no {name!r} option found") 1904 raise ValueError(f"no option named {name!r}") from e
Return command line option value.
Parameters
- name: Name of the option. You may also specify
the literal
--OPToption instead of the "dest" option name. - default: Fallback value if no option of that name is *declared via :hook:
pytest_addoption. Note this parameter will be ignored when the option is *declared even if the option's value isNone. - skip: If
True, raisepytest.skip()if option is undeclared or has aNonevalue. Note that even ifTrue, if a default was specified it will be returned instead of a skip.
1906 def getvalue(self, name: str, path=None): 1907 """Deprecated, use getoption() instead.""" 1908 return self.getoption(name)
Deprecated, use getoption() instead.
1910 def getvalueorskip(self, name: str, path=None): 1911 """Deprecated, use getoption(skip=True) instead.""" 1912 return self.getoption(name, skip=True)
Deprecated, use getoption(skip=True) instead.
1923 def get_verbosity(self, verbosity_type: str | None = None) -> int: 1924 r"""Retrieve the verbosity level for a fine-grained verbosity type. 1925 1926 :param verbosity_type: Verbosity type to get level for. If a level is 1927 configured for the given type, that value will be returned. If the 1928 given type is not a known verbosity type, the global verbosity 1929 level will be returned. If the given type is None (default), the 1930 global verbosity level will be returned. 1931 1932 To configure a level for a fine-grained verbosity type, the 1933 configuration file should have a setting for the configuration name 1934 and a numeric value for the verbosity level. A special value of "auto" 1935 can be used to explicitly use the global verbosity level. 1936 1937 Example: 1938 1939 .. tab:: toml 1940 1941 .. code-block:: toml 1942 1943 [tool.pytest] 1944 verbosity_assertions = 2 1945 1946 .. tab:: ini 1947 1948 .. code-block:: ini 1949 1950 [pytest] 1951 verbosity_assertions = 2 1952 1953 .. code-block:: console 1954 1955 pytest -v 1956 1957 .. code-block:: python 1958 1959 print(config.get_verbosity()) # 1 1960 print(config.get_verbosity(Config.VERBOSITY_ASSERTIONS)) # 2 1961 """ 1962 global_level = self.getoption("verbose", default=0) 1963 assert isinstance(global_level, int) 1964 if verbosity_type is None: 1965 return global_level 1966 1967 ini_name = Config._verbosity_ini_name(verbosity_type) 1968 if ini_name not in self._parser._inidict: 1969 return global_level 1970 1971 level = self.getini(ini_name) 1972 if level == Config._VERBOSITY_INI_DEFAULT: 1973 return global_level 1974 1975 return int(level)
Retrieve the verbosity level for a fine-grained verbosity type.
Parameters
- verbosity_type: Verbosity type to get level for. If a level is configured for the given type, that value will be returned. If the given type is not a known verbosity type, the global verbosity level will be returned. If the given type is None (default), the global verbosity level will be returned.
To configure a level for a fine-grained verbosity type, the configuration file should have a setting for the configuration name and a numeric value for the verbosity level. A special value of "auto" can be used to explicitly use the global verbosity level.
Example:
.. tab:: toml
```toml
[tool.pytest] verbosity_assertions = 2
.. tab:: ini
```ini
[pytest]
verbosity_assertions = 2
pytest -v
print(config.get_verbosity()) # 1
print(config.get_verbosity(Config.VERBOSITY_ASSERTIONS)) # 2
1029 @final 1030 @dataclasses.dataclass(frozen=True) 1031 class InvocationParams: 1032 """Holds parameters passed during :func:`pytest.main`. 1033 1034 The object attributes are read-only. 1035 1036 .. versionadded:: 5.1 1037 1038 .. note:: 1039 1040 Note that the environment variable ``PYTEST_ADDOPTS`` and the ``addopts`` 1041 configuration option are handled by pytest, not being included in the ``args`` attribute. 1042 1043 Plugins accessing ``InvocationParams`` must be aware of that. 1044 """ 1045 1046 args: tuple[str, ...] 1047 """The command-line arguments as passed to :func:`pytest.main`.""" 1048 plugins: Sequence[str | _PluggyPlugin] | None 1049 """Extra plugins, might be `None`.""" 1050 dir: pathlib.Path 1051 """The directory from which :func:`pytest.main` was invoked.""" 1052 1053 def __init__( 1054 self, 1055 *, 1056 args: Iterable[str], 1057 plugins: Sequence[str | _PluggyPlugin] | None, 1058 dir: pathlib.Path, 1059 ) -> None: 1060 object.__setattr__(self, "args", tuple(args)) 1061 object.__setattr__(self, "plugins", plugins) 1062 object.__setattr__(self, "dir", dir)
Holds parameters passed during pytest.main().
The object attributes are read-only.
New in version 5.1.
Note that the environment variable PYTEST_ADDOPTS and the addopts
configuration option are handled by pytest, not being included in the args attribute.
Plugins accessing InvocationParams must be aware of that.
1064 class ArgsSource(enum.Enum): 1065 """Indicates the source of the test arguments. 1066 1067 .. versionadded:: 7.2 1068 """ 1069 1070 #: Command line arguments. 1071 ARGS = enum.auto() 1072 #: Invocation directory. 1073 INVOCATION_DIR = enum.auto() 1074 INCOVATION_DIR = INVOCATION_DIR # backwards compatibility alias 1075 #: 'testpaths' configuration value. 1076 TESTPATHS = enum.auto()
Indicates the source of the test arguments.
New in version 7.2.
525@final 526class Dir(nodes.Directory): 527 """Collector of files in a file system directory. 528 529 .. versionadded:: 8.0 530 531 .. note:: 532 533 Python directories with an `__init__.py` file are instead collected by 534 :class:`~pytest.Package` by default. Both are :class:`~pytest.Directory` 535 collectors. 536 """ 537 538 @classmethod 539 def from_parent( # type: ignore[override] 540 cls, 541 parent: nodes.Collector, 542 *, 543 path: Path, 544 ) -> Self: 545 """The public constructor. 546 547 :param parent: The parent collector of this Dir. 548 :param path: The directory's path. 549 :type path: pathlib.Path 550 """ 551 return super().from_parent(parent=parent, path=path) 552 553 def collect(self) -> Iterable[nodes.Item | nodes.Collector]: 554 config = self.config 555 col: nodes.Collector | None 556 cols: Sequence[nodes.Collector] 557 ihook = self.ihook 558 for direntry in scandir(self.path): 559 if direntry.is_dir(): 560 path = Path(direntry.path) 561 if not self.session.isinitpath(path, with_parents=True): 562 if ihook.pytest_ignore_collect(collection_path=path, config=config): 563 continue 564 col = ihook.pytest_collect_directory(path=path, parent=self) 565 if col is not None: 566 yield col 567 568 elif direntry.is_file(): 569 path = Path(direntry.path) 570 if not self.session.isinitpath(path): 571 if ihook.pytest_ignore_collect(collection_path=path, config=config): 572 continue 573 cols = ihook.pytest_collect_file(file_path=path, parent=self) 574 yield from cols
Collector of files in a file system directory.
New in version 8.0.
Python directories with an __init__.py file are instead collected by
~pytest.Package by default. Both are ~pytest.Directory
collectors.
538 @classmethod 539 def from_parent( # type: ignore[override] 540 cls, 541 parent: nodes.Collector, 542 *, 543 path: Path, 544 ) -> Self: 545 """The public constructor. 546 547 :param parent: The parent collector of this Dir. 548 :param path: The directory's path. 549 :type path: pathlib.Path 550 """ 551 return super().from_parent(parent=parent, path=path)
The public constructor.
Parameters
- parent: The parent collector of this Dir.
- path: The directory's path.
553 def collect(self) -> Iterable[nodes.Item | nodes.Collector]: 554 config = self.config 555 col: nodes.Collector | None 556 cols: Sequence[nodes.Collector] 557 ihook = self.ihook 558 for direntry in scandir(self.path): 559 if direntry.is_dir(): 560 path = Path(direntry.path) 561 if not self.session.isinitpath(path, with_parents=True): 562 if ihook.pytest_ignore_collect(collection_path=path, config=config): 563 continue 564 col = ihook.pytest_collect_directory(path=path, parent=self) 565 if col is not None: 566 yield col 567 568 elif direntry.is_file(): 569 path = Path(direntry.path) 570 if not self.session.isinitpath(path): 571 if ihook.pytest_ignore_collect(collection_path=path, config=config): 572 continue 573 cols = ihook.pytest_collect_file(file_path=path, parent=self) 574 yield from cols
Collect children (items and collectors) for this collector.
638class Directory(FSCollector, abc.ABC): 639 """Base class for collecting files from a directory. 640 641 A basic directory collector does the following: goes over the files and 642 sub-directories in the directory and creates collectors for them by calling 643 the hooks :hook:`pytest_collect_directory` and :hook:`pytest_collect_file`, 644 after checking that they are not ignored using 645 :hook:`pytest_ignore_collect`. 646 647 The default directory collectors are :class:`~pytest.Dir` and 648 :class:`~pytest.Package`. 649 650 .. versionadded:: 8.0 651 652 :ref:`custom directory collectors`. 653 """
Base class for collecting files from a directory.
A basic directory collector does the following: goes over the files and
sub-directories in the directory and creates collectors for them by calling
the hooks :hook:pytest_collect_directory and :hook:pytest_collect_file,
after checking that they are not ignored using
:hook:pytest_ignore_collect.
The default directory collectors are ~pytest.Dir and
~pytest.Package.
New in version 8.0.
:ref:custom directory collectors.
252class DoctestItem(Item): 253 def __init__( 254 self, 255 name: str, 256 parent: DoctestTextfile | DoctestModule, 257 runner: doctest.DocTestRunner, 258 dtest: doctest.DocTest, 259 ) -> None: 260 super().__init__(name, parent) 261 self.runner = runner 262 self.dtest = dtest 263 264 # Stuff needed for fixture support. 265 self.obj = None 266 fm = self.session._fixturemanager 267 fixtureinfo = fm.getfixtureinfo(node=self, func=None, cls=None) 268 self._fixtureinfo = fixtureinfo 269 self.fixturenames = fixtureinfo.names_closure 270 self._initrequest() 271 272 @classmethod 273 def from_parent( # type: ignore[override] 274 cls, 275 parent: DoctestTextfile | DoctestModule, 276 *, 277 name: str, 278 runner: doctest.DocTestRunner, 279 dtest: doctest.DocTest, 280 ) -> Self: 281 # incompatible signature due to imposed limits on subclass 282 """The public named constructor.""" 283 return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest) 284 285 def _initrequest(self) -> None: 286 self.funcargs: dict[str, object] = {} 287 self._request = TopRequest(self, _ispytest=True) # type: ignore[arg-type] 288 289 def setup(self) -> None: 290 self._request._fillfixtures() 291 globs = dict(getfixture=self._request.getfixturevalue) 292 for name, value in self._request.getfixturevalue("doctest_namespace").items(): 293 globs[name] = value 294 self.dtest.globs.update(globs) 295 296 def runtest(self) -> None: 297 _check_all_skipped(self.dtest) 298 self._disable_output_capturing_for_darwin() 299 failures: list[doctest.DocTestFailure] = [] 300 # Type ignored because we change the type of `out` from what 301 # doctest expects. 302 self.runner.run(self.dtest, out=failures) # type: ignore[arg-type] 303 if failures: 304 raise MultipleDoctestFailures(failures) 305 306 def _disable_output_capturing_for_darwin(self) -> None: 307 """Disable output capturing. Otherwise, stdout is lost to doctest (#985).""" 308 if platform.system() != "Darwin": 309 return 310 capman = self.config.pluginmanager.getplugin("capturemanager") 311 if capman: 312 capman.suspend_global_capture(in_=True) 313 out, err = capman.read_global_capture() 314 sys.stdout.write(out) 315 sys.stderr.write(err) 316 317 # TODO: Type ignored -- breaks Liskov Substitution. 318 def repr_failure( # type: ignore[override] 319 self, 320 excinfo: ExceptionInfo[BaseException], 321 ) -> str | TerminalRepr: 322 import doctest 323 324 failures: ( 325 Sequence[doctest.DocTestFailure | doctest.UnexpectedException] | None 326 ) = None 327 if isinstance( 328 excinfo.value, doctest.DocTestFailure | doctest.UnexpectedException 329 ): 330 failures = [excinfo.value] 331 elif isinstance(excinfo.value, MultipleDoctestFailures): 332 failures = excinfo.value.failures 333 334 if failures is None: 335 return super().repr_failure(excinfo) 336 337 reprlocation_lines = [] 338 for failure in failures: 339 example = failure.example 340 test = failure.test 341 filename = test.filename 342 if test.lineno is None: 343 lineno = None 344 else: 345 lineno = test.lineno + example.lineno + 1 346 message = type(failure).__name__ 347 # TODO: ReprFileLocation doesn't expect a None lineno. 348 reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type] 349 checker = _get_checker() 350 report_choice = _get_report_choice(self.config.getoption("doctestreport")) 351 if lineno is not None: 352 assert failure.test.docstring is not None 353 lines = failure.test.docstring.splitlines(False) 354 # add line numbers to the left of the error message 355 assert test.lineno is not None 356 lines = [ 357 f"{i + test.lineno + 1:03d} {x}" for (i, x) in enumerate(lines) 358 ] 359 # trim docstring error lines to 10 360 lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] 361 else: 362 lines = [ 363 "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" 364 ] 365 indent = ">>>" 366 for line in example.source.splitlines(): 367 lines.append(f"??? {indent} {line}") 368 indent = "..." 369 if isinstance(failure, doctest.DocTestFailure): 370 lines += checker.output_difference( 371 example, failure.got, report_choice 372 ).split("\n") 373 else: 374 inner_excinfo = ExceptionInfo.from_exc_info(failure.exc_info) 375 lines += [f"UNEXPECTED EXCEPTION: {inner_excinfo.value!r}"] 376 lines += [ 377 x.strip("\n") for x in traceback.format_exception(*failure.exc_info) 378 ] 379 reprlocation_lines.append((reprlocation, lines)) 380 return ReprFailDoctest(reprlocation_lines) 381 382 def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: 383 return self.path, self.dtest.lineno, f"[doctest] {self.name}"
Base class of all test invocation items.
Note that for a single function there might be multiple test invocation items.
253 def __init__( 254 self, 255 name: str, 256 parent: DoctestTextfile | DoctestModule, 257 runner: doctest.DocTestRunner, 258 dtest: doctest.DocTest, 259 ) -> None: 260 super().__init__(name, parent) 261 self.runner = runner 262 self.dtest = dtest 263 264 # Stuff needed for fixture support. 265 self.obj = None 266 fm = self.session._fixturemanager 267 fixtureinfo = fm.getfixtureinfo(node=self, func=None, cls=None) 268 self._fixtureinfo = fixtureinfo 269 self.fixturenames = fixtureinfo.names_closure 270 self._initrequest()
272 @classmethod 273 def from_parent( # type: ignore[override] 274 cls, 275 parent: DoctestTextfile | DoctestModule, 276 *, 277 name: str, 278 runner: doctest.DocTestRunner, 279 dtest: doctest.DocTest, 280 ) -> Self: 281 # incompatible signature due to imposed limits on subclass 282 """The public named constructor.""" 283 return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest)
The public named constructor.
296 def runtest(self) -> None: 297 _check_all_skipped(self.dtest) 298 self._disable_output_capturing_for_darwin() 299 failures: list[doctest.DocTestFailure] = [] 300 # Type ignored because we change the type of `out` from what 301 # doctest expects. 302 self.runner.run(self.dtest, out=failures) # type: ignore[arg-type] 303 if failures: 304 raise MultipleDoctestFailures(failures)
Run the test case for this item.
Must be implemented by subclasses.
seealso :ref:non-python tests.
318 def repr_failure( # type: ignore[override] 319 self, 320 excinfo: ExceptionInfo[BaseException], 321 ) -> str | TerminalRepr: 322 import doctest 323 324 failures: ( 325 Sequence[doctest.DocTestFailure | doctest.UnexpectedException] | None 326 ) = None 327 if isinstance( 328 excinfo.value, doctest.DocTestFailure | doctest.UnexpectedException 329 ): 330 failures = [excinfo.value] 331 elif isinstance(excinfo.value, MultipleDoctestFailures): 332 failures = excinfo.value.failures 333 334 if failures is None: 335 return super().repr_failure(excinfo) 336 337 reprlocation_lines = [] 338 for failure in failures: 339 example = failure.example 340 test = failure.test 341 filename = test.filename 342 if test.lineno is None: 343 lineno = None 344 else: 345 lineno = test.lineno + example.lineno + 1 346 message = type(failure).__name__ 347 # TODO: ReprFileLocation doesn't expect a None lineno. 348 reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type] 349 checker = _get_checker() 350 report_choice = _get_report_choice(self.config.getoption("doctestreport")) 351 if lineno is not None: 352 assert failure.test.docstring is not None 353 lines = failure.test.docstring.splitlines(False) 354 # add line numbers to the left of the error message 355 assert test.lineno is not None 356 lines = [ 357 f"{i + test.lineno + 1:03d} {x}" for (i, x) in enumerate(lines) 358 ] 359 # trim docstring error lines to 10 360 lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] 361 else: 362 lines = [ 363 "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" 364 ] 365 indent = ">>>" 366 for line in example.source.splitlines(): 367 lines.append(f"??? {indent} {line}") 368 indent = "..." 369 if isinstance(failure, doctest.DocTestFailure): 370 lines += checker.output_difference( 371 example, failure.got, report_choice 372 ).split("\n") 373 else: 374 inner_excinfo = ExceptionInfo.from_exc_info(failure.exc_info) 375 lines += [f"UNEXPECTED EXCEPTION: {inner_excinfo.value!r}"] 376 lines += [ 377 x.strip("\n") for x in traceback.format_exception(*failure.exc_info) 378 ] 379 reprlocation_lines.append((reprlocation, lines)) 380 return ReprFailDoctest(reprlocation_lines)
Return a representation of a collection or test failure.
seealso :ref:non-python tests.
Parameters
- excinfo: Exception information for the failure.
382 def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: 383 return self.path, self.dtest.lineno, f"[doctest] {self.name}"
Get location information for this item for test reports.
Returns a tuple with three elements:
- The path of the test (default
self.path) - The 0-based line number of the test (default
None) - A name of the test to be shown (default
"")
seealso :ref:non-python tests.
495@final 496@dataclasses.dataclass 497class ExceptionInfo(Generic[E]): 498 """Wraps sys.exc_info() objects and offers help for navigating the traceback.""" 499 500 _assert_start_repr: ClassVar = "AssertionError('assert " 501 502 _excinfo: tuple[type[E], E, TracebackType] | None 503 _striptext: str 504 _traceback: Traceback | None 505 506 def __init__( 507 self, 508 excinfo: tuple[type[E], E, TracebackType] | None, 509 striptext: str = "", 510 traceback: Traceback | None = None, 511 *, 512 _ispytest: bool = False, 513 ) -> None: 514 check_ispytest(_ispytest) 515 self._excinfo = excinfo 516 self._striptext = striptext 517 self._traceback = traceback 518 519 @classmethod 520 def from_exception( 521 cls, 522 # Ignoring error: "Cannot use a covariant type variable as a parameter". 523 # This is OK to ignore because this class is (conceptually) readonly. 524 # See https://github.com/python/mypy/issues/7049. 525 exception: E, # type: ignore[misc] 526 exprinfo: str | None = None, 527 ) -> ExceptionInfo[E]: 528 """Return an ExceptionInfo for an existing exception. 529 530 The exception must have a non-``None`` ``__traceback__`` attribute, 531 otherwise this function fails with an assertion error. This means that 532 the exception must have been raised, or added a traceback with the 533 :py:meth:`~BaseException.with_traceback()` method. 534 535 :param exprinfo: 536 A text string helping to determine if we should strip 537 ``AssertionError`` from the output. Defaults to the exception 538 message/``__str__()``. 539 540 .. versionadded:: 7.4 541 """ 542 assert exception.__traceback__, ( 543 "Exceptions passed to ExcInfo.from_exception(...)" 544 " must have a non-None __traceback__." 545 ) 546 exc_info = (type(exception), exception, exception.__traceback__) 547 return cls.from_exc_info(exc_info, exprinfo) 548 549 @classmethod 550 def from_exc_info( 551 cls, 552 exc_info: tuple[type[E], E, TracebackType], 553 exprinfo: str | None = None, 554 ) -> ExceptionInfo[E]: 555 """Like :func:`from_exception`, but using old-style exc_info tuple.""" 556 _striptext = "" 557 if exprinfo is None and isinstance(exc_info[1], AssertionError): 558 exprinfo = getattr(exc_info[1], "msg", None) 559 if exprinfo is None: 560 exprinfo = saferepr(exc_info[1]) 561 if exprinfo and exprinfo.startswith(cls._assert_start_repr): 562 _striptext = "AssertionError: " 563 564 return cls(exc_info, _striptext, _ispytest=True) 565 566 @classmethod 567 def from_current(cls, exprinfo: str | None = None) -> ExceptionInfo[BaseException]: 568 """Return an ExceptionInfo matching the current traceback. 569 570 .. warning:: 571 572 Experimental API 573 574 :param exprinfo: 575 A text string helping to determine if we should strip 576 ``AssertionError`` from the output. Defaults to the exception 577 message/``__str__()``. 578 """ 579 tup = sys.exc_info() 580 assert tup[0] is not None, "no current exception" 581 assert tup[1] is not None, "no current exception" 582 assert tup[2] is not None, "no current exception" 583 exc_info = (tup[0], tup[1], tup[2]) 584 return ExceptionInfo.from_exc_info(exc_info, exprinfo) 585 586 @classmethod 587 def for_later(cls) -> ExceptionInfo[E]: 588 """Return an unfilled ExceptionInfo.""" 589 return cls(None, _ispytest=True) 590 591 def fill_unfilled(self, exc_info: tuple[type[E], E, TracebackType]) -> None: 592 """Fill an unfilled ExceptionInfo created with ``for_later()``.""" 593 assert self._excinfo is None, "ExceptionInfo was already filled" 594 self._excinfo = exc_info 595 596 @property 597 def type(self) -> type[E]: 598 """The exception class.""" 599 assert self._excinfo is not None, ( 600 ".type can only be used after the context manager exits" 601 ) 602 return self._excinfo[0] 603 604 @property 605 def value(self) -> E: 606 """The exception value.""" 607 assert self._excinfo is not None, ( 608 ".value can only be used after the context manager exits" 609 ) 610 return self._excinfo[1] 611 612 @property 613 def tb(self) -> TracebackType: 614 """The exception raw traceback.""" 615 assert self._excinfo is not None, ( 616 ".tb can only be used after the context manager exits" 617 ) 618 return self._excinfo[2] 619 620 @property 621 def typename(self) -> str: 622 """The type name of the exception.""" 623 assert self._excinfo is not None, ( 624 ".typename can only be used after the context manager exits" 625 ) 626 return self.type.__name__ 627 628 @property 629 def traceback(self) -> Traceback: 630 """The traceback.""" 631 if self._traceback is None: 632 self._traceback = Traceback(self.tb) 633 return self._traceback 634 635 @traceback.setter 636 def traceback(self, value: Traceback) -> None: 637 self._traceback = value 638 639 def __repr__(self) -> str: 640 if self._excinfo is None: 641 return "<ExceptionInfo for raises contextmanager>" 642 return f"<{self.__class__.__name__} {saferepr(self._excinfo[1])} tblen={len(self.traceback)}>" 643 644 def exconly(self, tryshort: bool = False) -> str: 645 """Return the exception as a string. 646 647 When 'tryshort' resolves to True, and the exception is an 648 AssertionError, only the actual exception part of the exception 649 representation is returned (so 'AssertionError: ' is removed from 650 the beginning). 651 """ 652 653 def _get_single_subexc( 654 eg: BaseExceptionGroup[BaseException], 655 ) -> BaseException | None: 656 if len(eg.exceptions) != 1: 657 return None 658 if isinstance(e := eg.exceptions[0], BaseExceptionGroup): 659 return _get_single_subexc(e) 660 return e 661 662 if ( 663 tryshort 664 and isinstance(self.value, BaseExceptionGroup) 665 and (subexc := _get_single_subexc(self.value)) is not None 666 ): 667 return f"{subexc!r} [single exception in {type(self.value).__name__}]" 668 669 lines = format_exception_only(self.type, self.value) 670 text = "".join(lines) 671 text = text.rstrip() 672 if tryshort: 673 if text.startswith(self._striptext): 674 text = text[len(self._striptext) :] 675 return text 676 677 def errisinstance(self, exc: EXCEPTION_OR_MORE) -> bool: 678 """Return True if the exception is an instance of exc. 679 680 Consider using ``isinstance(excinfo.value, exc)`` instead. 681 """ 682 return isinstance(self.value, exc) 683 684 def _getreprcrash(self) -> ReprFileLocation | None: 685 # Find last non-hidden traceback entry that led to the exception of the 686 # traceback, or None if all hidden. 687 for i in range(-1, -len(self.traceback) - 1, -1): 688 entry = self.traceback[i] 689 if not entry.ishidden(self): 690 path, lineno = entry.frame.code.raw.co_filename, entry.lineno 691 exconly = self.exconly(tryshort=True) 692 return ReprFileLocation(path, lineno + 1, exconly) 693 return None 694 695 def getrepr( 696 self, 697 showlocals: bool = False, 698 style: TracebackStyle = "long", 699 abspath: bool = False, 700 tbfilter: bool | Callable[[ExceptionInfo[BaseException]], Traceback] = True, 701 funcargs: bool = False, 702 truncate_locals: bool = True, 703 truncate_args: bool = True, 704 chain: bool = True, 705 ) -> ReprExceptionInfo | ExceptionChainRepr: 706 """Return str()able representation of this exception info. 707 708 :param bool showlocals: 709 Show locals per traceback entry. 710 Ignored if ``style=="native"``. 711 712 :param str style: 713 long|short|line|no|native|value traceback style. 714 715 :param bool abspath: 716 If paths should be changed to absolute or left unchanged. 717 718 :param tbfilter: 719 A filter for traceback entries. 720 721 * If false, don't hide any entries. 722 * If true, hide internal entries and entries that contain a local 723 variable ``__tracebackhide__ = True``. 724 * If a callable, delegates the filtering to the callable. 725 726 Ignored if ``style`` is ``"native"``. 727 728 :param bool funcargs: 729 Show fixtures ("funcargs" for legacy purposes) per traceback entry. 730 731 :param bool truncate_locals: 732 With ``showlocals==True``, make sure locals can be safely represented as strings. 733 734 :param bool truncate_args: 735 With ``showargs==True``, make sure args can be safely represented as strings. 736 737 :param bool chain: 738 If chained exceptions in Python 3 should be shown. 739 740 .. versionchanged:: 3.9 741 742 Added the ``chain`` parameter. 743 """ 744 if style == "native": 745 return ReprExceptionInfo( 746 reprtraceback=ReprTracebackNative( 747 format_exception( 748 self.type, 749 self.value, 750 self.traceback[0]._rawentry if self.traceback else None, 751 ) 752 ), 753 reprcrash=self._getreprcrash(), 754 ) 755 756 fmt = FormattedExcinfo( 757 showlocals=showlocals, 758 style=style, 759 abspath=abspath, 760 tbfilter=tbfilter, 761 funcargs=funcargs, 762 truncate_locals=truncate_locals, 763 truncate_args=truncate_args, 764 chain=chain, 765 ) 766 return fmt.repr_excinfo(self) 767 768 def match(self, regexp: str | re.Pattern[str]) -> Literal[True]: 769 """Check whether the regular expression `regexp` matches the string 770 representation of the exception using :func:`python:re.search`. 771 772 If it matches `True` is returned, otherwise an `AssertionError` is raised. 773 """ 774 __tracebackhide__ = True 775 value = stringify_exception(self.value) 776 msg = ( 777 f"Regex pattern did not match.\n" 778 f" Expected regex: {regexp!r}\n" 779 f" Actual message: {value!r}" 780 ) 781 if regexp == value: 782 msg += "\n Did you mean to `re.escape()` the regex?" 783 assert re.search(regexp, value), msg 784 # Return True to allow for "assert excinfo.match()". 785 return True 786 787 def _group_contains( 788 self, 789 exc_group: BaseExceptionGroup[BaseException], 790 expected_exception: EXCEPTION_OR_MORE, 791 match: str | re.Pattern[str] | None, 792 target_depth: int | None = None, 793 current_depth: int = 1, 794 ) -> bool: 795 """Return `True` if a `BaseExceptionGroup` contains a matching exception.""" 796 if (target_depth is not None) and (current_depth > target_depth): 797 # already descended past the target depth 798 return False 799 for exc in exc_group.exceptions: 800 if isinstance(exc, BaseExceptionGroup): 801 if self._group_contains( 802 exc, expected_exception, match, target_depth, current_depth + 1 803 ): 804 return True 805 if (target_depth is not None) and (current_depth != target_depth): 806 # not at the target depth, no match 807 continue 808 if not isinstance(exc, expected_exception): 809 continue 810 if match is not None: 811 value = stringify_exception(exc) 812 if not re.search(match, value): 813 continue 814 return True 815 return False 816 817 def group_contains( 818 self, 819 expected_exception: EXCEPTION_OR_MORE, 820 *, 821 match: str | re.Pattern[str] | None = None, 822 depth: int | None = None, 823 ) -> bool: 824 """Check whether a captured exception group contains a matching exception. 825 826 :param Type[BaseException] | Tuple[Type[BaseException]] expected_exception: 827 The expected exception type, or a tuple if one of multiple possible 828 exception types are expected. 829 830 :param str | re.Pattern[str] | None match: 831 If specified, a string containing a regular expression, 832 or a regular expression object, that is tested against the string 833 representation of the exception and its `PEP-678 <https://peps.python.org/pep-0678/>` `__notes__` 834 using :func:`re.search`. 835 836 To match a literal string that may contain :ref:`special characters 837 <re-syntax>`, the pattern can first be escaped with :func:`re.escape`. 838 839 :param Optional[int] depth: 840 If `None`, will search for a matching exception at any nesting depth. 841 If >= 1, will only match an exception if it's at the specified depth (depth = 1 being 842 the exceptions contained within the topmost exception group). 843 844 .. versionadded:: 8.0 845 846 .. warning:: 847 This helper makes it easy to check for the presence of specific exceptions, 848 but it is very bad for checking that the group does *not* contain 849 *any other exceptions*. 850 You should instead consider using :class:`pytest.RaisesGroup` 851 852 """ 853 msg = "Captured exception is not an instance of `BaseExceptionGroup`" 854 assert isinstance(self.value, BaseExceptionGroup), msg 855 msg = "`depth` must be >= 1 if specified" 856 assert (depth is None) or (depth >= 1), msg 857 return self._group_contains(self.value, expected_exception, match, depth)
Wraps sys.exc_info() objects and offers help for navigating the traceback.
506 def __init__( 507 self, 508 excinfo: tuple[type[E], E, TracebackType] | None, 509 striptext: str = "", 510 traceback: Traceback | None = None, 511 *, 512 _ispytest: bool = False, 513 ) -> None: 514 check_ispytest(_ispytest) 515 self._excinfo = excinfo 516 self._striptext = striptext 517 self._traceback = traceback
519 @classmethod 520 def from_exception( 521 cls, 522 # Ignoring error: "Cannot use a covariant type variable as a parameter". 523 # This is OK to ignore because this class is (conceptually) readonly. 524 # See https://github.com/python/mypy/issues/7049. 525 exception: E, # type: ignore[misc] 526 exprinfo: str | None = None, 527 ) -> ExceptionInfo[E]: 528 """Return an ExceptionInfo for an existing exception. 529 530 The exception must have a non-``None`` ``__traceback__`` attribute, 531 otherwise this function fails with an assertion error. This means that 532 the exception must have been raised, or added a traceback with the 533 :py:meth:`~BaseException.with_traceback()` method. 534 535 :param exprinfo: 536 A text string helping to determine if we should strip 537 ``AssertionError`` from the output. Defaults to the exception 538 message/``__str__()``. 539 540 .. versionadded:: 7.4 541 """ 542 assert exception.__traceback__, ( 543 "Exceptions passed to ExcInfo.from_exception(...)" 544 " must have a non-None __traceback__." 545 ) 546 exc_info = (type(exception), exception, exception.__traceback__) 547 return cls.from_exc_info(exc_info, exprinfo)
Return an ExceptionInfo for an existing exception.
The exception must have a non-None __traceback__ attribute,
otherwise this function fails with an assertion error. This means that
the exception must have been raised, or added a traceback with the
~BaseException.with_traceback()() method.
Parameters
- exprinfo:
A text string helping to determine if we should strip
AssertionErrorfrom the output. Defaults to the exception message/__str__().
New in version 7.4.
549 @classmethod 550 def from_exc_info( 551 cls, 552 exc_info: tuple[type[E], E, TracebackType], 553 exprinfo: str | None = None, 554 ) -> ExceptionInfo[E]: 555 """Like :func:`from_exception`, but using old-style exc_info tuple.""" 556 _striptext = "" 557 if exprinfo is None and isinstance(exc_info[1], AssertionError): 558 exprinfo = getattr(exc_info[1], "msg", None) 559 if exprinfo is None: 560 exprinfo = saferepr(exc_info[1]) 561 if exprinfo and exprinfo.startswith(cls._assert_start_repr): 562 _striptext = "AssertionError: " 563 564 return cls(exc_info, _striptext, _ispytest=True)
Like from_exception(), but using old-style exc_info tuple.
566 @classmethod 567 def from_current(cls, exprinfo: str | None = None) -> ExceptionInfo[BaseException]: 568 """Return an ExceptionInfo matching the current traceback. 569 570 .. warning:: 571 572 Experimental API 573 574 :param exprinfo: 575 A text string helping to determine if we should strip 576 ``AssertionError`` from the output. Defaults to the exception 577 message/``__str__()``. 578 """ 579 tup = sys.exc_info() 580 assert tup[0] is not None, "no current exception" 581 assert tup[1] is not None, "no current exception" 582 assert tup[2] is not None, "no current exception" 583 exc_info = (tup[0], tup[1], tup[2]) 584 return ExceptionInfo.from_exc_info(exc_info, exprinfo)
Return an ExceptionInfo matching the current traceback.
Experimental API
Parameters
- exprinfo:
A text string helping to determine if we should strip
AssertionErrorfrom the output. Defaults to the exception message/__str__().
586 @classmethod 587 def for_later(cls) -> ExceptionInfo[E]: 588 """Return an unfilled ExceptionInfo.""" 589 return cls(None, _ispytest=True)
Return an unfilled ExceptionInfo.
591 def fill_unfilled(self, exc_info: tuple[type[E], E, TracebackType]) -> None: 592 """Fill an unfilled ExceptionInfo created with ``for_later()``.""" 593 assert self._excinfo is None, "ExceptionInfo was already filled" 594 self._excinfo = exc_info
Fill an unfilled ExceptionInfo created with for_later().
596 @property 597 def type(self) -> type[E]: 598 """The exception class.""" 599 assert self._excinfo is not None, ( 600 ".type can only be used after the context manager exits" 601 ) 602 return self._excinfo[0]
The exception class.
604 @property 605 def value(self) -> E: 606 """The exception value.""" 607 assert self._excinfo is not None, ( 608 ".value can only be used after the context manager exits" 609 ) 610 return self._excinfo[1]
The exception value.
612 @property 613 def tb(self) -> TracebackType: 614 """The exception raw traceback.""" 615 assert self._excinfo is not None, ( 616 ".tb can only be used after the context manager exits" 617 ) 618 return self._excinfo[2]
The exception raw traceback.
620 @property 621 def typename(self) -> str: 622 """The type name of the exception.""" 623 assert self._excinfo is not None, ( 624 ".typename can only be used after the context manager exits" 625 ) 626 return self.type.__name__
The type name of the exception.
628 @property 629 def traceback(self) -> Traceback: 630 """The traceback.""" 631 if self._traceback is None: 632 self._traceback = Traceback(self.tb) 633 return self._traceback
The traceback.
644 def exconly(self, tryshort: bool = False) -> str: 645 """Return the exception as a string. 646 647 When 'tryshort' resolves to True, and the exception is an 648 AssertionError, only the actual exception part of the exception 649 representation is returned (so 'AssertionError: ' is removed from 650 the beginning). 651 """ 652 653 def _get_single_subexc( 654 eg: BaseExceptionGroup[BaseException], 655 ) -> BaseException | None: 656 if len(eg.exceptions) != 1: 657 return None 658 if isinstance(e := eg.exceptions[0], BaseExceptionGroup): 659 return _get_single_subexc(e) 660 return e 661 662 if ( 663 tryshort 664 and isinstance(self.value, BaseExceptionGroup) 665 and (subexc := _get_single_subexc(self.value)) is not None 666 ): 667 return f"{subexc!r} [single exception in {type(self.value).__name__}]" 668 669 lines = format_exception_only(self.type, self.value) 670 text = "".join(lines) 671 text = text.rstrip() 672 if tryshort: 673 if text.startswith(self._striptext): 674 text = text[len(self._striptext) :] 675 return text
Return the exception as a string.
When 'tryshort' resolves to True, and the exception is an AssertionError, only the actual exception part of the exception representation is returned (so 'AssertionError: ' is removed from the beginning).
677 def errisinstance(self, exc: EXCEPTION_OR_MORE) -> bool: 678 """Return True if the exception is an instance of exc. 679 680 Consider using ``isinstance(excinfo.value, exc)`` instead. 681 """ 682 return isinstance(self.value, exc)
Return True if the exception is an instance of exc.
Consider using isinstance(excinfo.value, exc) instead.
695 def getrepr( 696 self, 697 showlocals: bool = False, 698 style: TracebackStyle = "long", 699 abspath: bool = False, 700 tbfilter: bool | Callable[[ExceptionInfo[BaseException]], Traceback] = True, 701 funcargs: bool = False, 702 truncate_locals: bool = True, 703 truncate_args: bool = True, 704 chain: bool = True, 705 ) -> ReprExceptionInfo | ExceptionChainRepr: 706 """Return str()able representation of this exception info. 707 708 :param bool showlocals: 709 Show locals per traceback entry. 710 Ignored if ``style=="native"``. 711 712 :param str style: 713 long|short|line|no|native|value traceback style. 714 715 :param bool abspath: 716 If paths should be changed to absolute or left unchanged. 717 718 :param tbfilter: 719 A filter for traceback entries. 720 721 * If false, don't hide any entries. 722 * If true, hide internal entries and entries that contain a local 723 variable ``__tracebackhide__ = True``. 724 * If a callable, delegates the filtering to the callable. 725 726 Ignored if ``style`` is ``"native"``. 727 728 :param bool funcargs: 729 Show fixtures ("funcargs" for legacy purposes) per traceback entry. 730 731 :param bool truncate_locals: 732 With ``showlocals==True``, make sure locals can be safely represented as strings. 733 734 :param bool truncate_args: 735 With ``showargs==True``, make sure args can be safely represented as strings. 736 737 :param bool chain: 738 If chained exceptions in Python 3 should be shown. 739 740 .. versionchanged:: 3.9 741 742 Added the ``chain`` parameter. 743 """ 744 if style == "native": 745 return ReprExceptionInfo( 746 reprtraceback=ReprTracebackNative( 747 format_exception( 748 self.type, 749 self.value, 750 self.traceback[0]._rawentry if self.traceback else None, 751 ) 752 ), 753 reprcrash=self._getreprcrash(), 754 ) 755 756 fmt = FormattedExcinfo( 757 showlocals=showlocals, 758 style=style, 759 abspath=abspath, 760 tbfilter=tbfilter, 761 funcargs=funcargs, 762 truncate_locals=truncate_locals, 763 truncate_args=truncate_args, 764 chain=chain, 765 ) 766 return fmt.repr_excinfo(self)
Return str()able representation of this exception info.
Parameters
bool showlocals: Show locals per traceback entry. Ignored if
style=="native".str style: long|short|line|no|native|value traceback style.
bool abspath: If paths should be changed to absolute or left unchanged.
tbfilter: A filter for traceback entries.
- If false, don't hide any entries.
- If true, hide internal entries and entries that contain a local
variable
__tracebackhide__ = True. - If a callable, delegates the filtering to the callable.
Ignored if
styleis"native".bool funcargs: Show fixtures ("funcargs" for legacy purposes) per traceback entry.
bool truncate_locals: With
showlocals==True, make sure locals can be safely represented as strings.bool truncate_args: With
showargs==True, make sure args can be safely represented as strings.bool chain: If chained exceptions in Python 3 should be shown.
Changed in version 3.9:
Added the chain parameter.
768 def match(self, regexp: str | re.Pattern[str]) -> Literal[True]: 769 """Check whether the regular expression `regexp` matches the string 770 representation of the exception using :func:`python:re.search`. 771 772 If it matches `True` is returned, otherwise an `AssertionError` is raised. 773 """ 774 __tracebackhide__ = True 775 value = stringify_exception(self.value) 776 msg = ( 777 f"Regex pattern did not match.\n" 778 f" Expected regex: {regexp!r}\n" 779 f" Actual message: {value!r}" 780 ) 781 if regexp == value: 782 msg += "\n Did you mean to `re.escape()` the regex?" 783 assert re.search(regexp, value), msg 784 # Return True to allow for "assert excinfo.match()". 785 return True
Check whether the regular expression regexp matches the string
representation of the exception using python:re.search().
If it matches True is returned, otherwise an AssertionError is raised.
817 def group_contains( 818 self, 819 expected_exception: EXCEPTION_OR_MORE, 820 *, 821 match: str | re.Pattern[str] | None = None, 822 depth: int | None = None, 823 ) -> bool: 824 """Check whether a captured exception group contains a matching exception. 825 826 :param Type[BaseException] | Tuple[Type[BaseException]] expected_exception: 827 The expected exception type, or a tuple if one of multiple possible 828 exception types are expected. 829 830 :param str | re.Pattern[str] | None match: 831 If specified, a string containing a regular expression, 832 or a regular expression object, that is tested against the string 833 representation of the exception and its `PEP-678 <https://peps.python.org/pep-0678/>` `__notes__` 834 using :func:`re.search`. 835 836 To match a literal string that may contain :ref:`special characters 837 <re-syntax>`, the pattern can first be escaped with :func:`re.escape`. 838 839 :param Optional[int] depth: 840 If `None`, will search for a matching exception at any nesting depth. 841 If >= 1, will only match an exception if it's at the specified depth (depth = 1 being 842 the exceptions contained within the topmost exception group). 843 844 .. versionadded:: 8.0 845 846 .. warning:: 847 This helper makes it easy to check for the presence of specific exceptions, 848 but it is very bad for checking that the group does *not* contain 849 *any other exceptions*. 850 You should instead consider using :class:`pytest.RaisesGroup` 851 852 """ 853 msg = "Captured exception is not an instance of `BaseExceptionGroup`" 854 assert isinstance(self.value, BaseExceptionGroup), msg 855 msg = "`depth` must be >= 1 if specified" 856 assert (depth is None) or (depth >= 1), msg 857 return self._group_contains(self.value, expected_exception, match, depth)
Check whether a captured exception group contains a matching exception.
Parameters
Type[BaseException] | Tuple[Type[BaseException]] expected_exception: The expected exception type, or a tuple if one of multiple possible exception types are expected.
str | re.Pattern[str] | None match: If specified, a string containing a regular expression, or a regular expression object, that is tested against the string representation of the exception and its
PEP-678 <https://peps.python.org/pep-0678/>__notes__usingre.search().To match a literal string that may contain :ref:
special characters <re-syntax>, the pattern can first be escaped withre.escape().Optional[int] depth: If
None, will search for a matching exception at any nesting depth. If >= 1, will only match an exception if it's at the specified depth (depth = 1 being the exceptions contained within the topmost exception group).
New in version 8.0.
This helper makes it easy to check for the presence of specific exceptions,
but it is very bad for checking that the group does not contain
any other exceptions.
You should instead consider using pytest.RaisesGroup
98__all__ = [ 99 "HIDDEN_PARAM", 100 "Cache", 101 "CallInfo", 102 "CaptureFixture", 103 "Class", 104 "CollectReport", 105 "Collector", 106 "Config", 107 "Dir", 108 "Directory", 109 "DoctestItem", 110 "ExceptionInfo", 111 "ExitCode", 112 "File", 113 "FixtureDef", 114 "FixtureLookupError", 115 "FixtureRequest", 116 "Function", 117 "HookRecorder", 118 "Item", 119 "LineMatcher", 120 "LogCaptureFixture", 121 "Mark", 122 "MarkDecorator", 123 "MarkGenerator", 124 "Metafunc", 125 "Module", 126 "MonkeyPatch", 127 "OptionGroup", 128 "Package", 129 "Parser", 130 "PytestAssertRewriteWarning", 131 "PytestCacheWarning", 132 "PytestCollectionWarning", 133 "PytestConfigWarning", 134 "PytestDeprecationWarning", 135 "PytestExperimentalApiWarning", 136 "PytestFDWarning", 137 "PytestPluginManager", 138 "PytestRemovedIn9Warning", 139 "PytestRemovedIn10Warning", 140 "PytestReturnNotNoneWarning", 141 "PytestUnhandledThreadExceptionWarning", 142 "PytestUnknownMarkWarning", 143 "PytestUnraisableExceptionWarning", 144 "PytestWarning", 145 "Pytester", 146 "RaisesExc", 147 "RaisesGroup", 148 "RecordedHookCall", 149 "RunResult", 150 "Session", 151 "Stash", 152 "StashKey", 153 "SubtestReport", 154 "Subtests", 155 "TempPathFactory", 156 "TempdirFactory", 157 "TerminalReporter", 158 "TestReport", 159 "TestShortLogReport", 160 "Testdir", 161 "UsageError", 162 "WarningsRecorder", 163 "__version__", 164 "approx", 165 "cmdline", 166 "console_main", 167 "deprecated_call", 168 "exit", 169 "fail", 170 "fixture", 171 "freeze_includes", 172 "hookimpl", 173 "hookspec", 174 "importorskip", 175 "main", 176 "mark", 177 "param", 178 "raises", 179 "register_assert_rewrite", 180 "set_trace", 181 "skip", 182 "version_tuple", 183 "warns", 184 "xfail", 185 "yield_fixture", 186]
Encodes the valid exit codes by pytest.
Currently users and plugins may supply other exit codes as well.
New in version 5.0.
631class File(FSCollector, abc.ABC): 632 """Base class for collecting tests from a file. 633 634 :ref:`non-python tests`. 635 """
Base class for collecting tests from a file.
:ref:non-python tests.
961class FixtureDef(Generic[FixtureValue]): 962 """A container for a fixture definition. 963 964 Note: At this time, only explicitly documented fields and methods are 965 considered public stable API. 966 """ 967 968 def __init__( 969 self, 970 config: Config, 971 baseid: str | None, 972 argname: str, 973 func: _FixtureFunc[FixtureValue], 974 scope: Scope | _ScopeName | Callable[[str, Config], _ScopeName] | None, 975 params: Sequence[object] | None, 976 ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None, 977 *, 978 _ispytest: bool = False, 979 # only used in a deprecationwarning msg, can be removed in pytest9 980 _autouse: bool = False, 981 ) -> None: 982 check_ispytest(_ispytest) 983 # The "base" node ID for the fixture. 984 # 985 # This is a node ID prefix. A fixture is only available to a node (e.g. 986 # a `Function` item) if the fixture's baseid is a nodeid of a parent of 987 # node. 988 # 989 # For a fixture found in a Collector's object (e.g. a `Module`s module, 990 # a `Class`'s class), the baseid is the Collector's nodeid. 991 # 992 # For a fixture found in a conftest plugin, the baseid is the conftest's 993 # directory path relative to the rootdir. 994 # 995 # For other plugins, the baseid is the empty string (always matches). 996 self.baseid: Final = baseid or "" 997 # Whether the fixture was found from a node or a conftest in the 998 # collection tree. Will be false for fixtures defined in non-conftest 999 # plugins. 1000 self.has_location: Final = baseid is not None 1001 # The fixture factory function. 1002 self.func: Final = func 1003 # The name by which the fixture may be requested. 1004 self.argname: Final = argname 1005 if scope is None: 1006 scope = Scope.Function 1007 elif callable(scope): 1008 scope = _eval_scope_callable(scope, argname, config) 1009 if isinstance(scope, str): 1010 scope = Scope.from_user( 1011 scope, descr=f"Fixture '{func.__name__}'", where=baseid 1012 ) 1013 self._scope: Final = scope 1014 # If the fixture is directly parametrized, the parameter values. 1015 self.params: Final = params 1016 # If the fixture is directly parametrized, a tuple of explicit IDs to 1017 # assign to the parameter values, or a callable to generate an ID given 1018 # a parameter value. 1019 self.ids: Final = ids 1020 # The names requested by the fixtures. 1021 self.argnames: Final = getfuncargnames(func, name=argname) 1022 # If the fixture was executed, the current value of the fixture. 1023 # Can change if the fixture is executed with different parameters. 1024 self.cached_result: _FixtureCachedResult[FixtureValue] | None = None 1025 self._finalizers: Final[list[Callable[[], object]]] = [] 1026 1027 # only used to emit a deprecationwarning, can be removed in pytest9 1028 self._autouse = _autouse 1029 1030 @property 1031 def scope(self) -> _ScopeName: 1032 """Scope string, one of "function", "class", "module", "package", "session".""" 1033 return self._scope.value 1034 1035 def addfinalizer(self, finalizer: Callable[[], object]) -> None: 1036 self._finalizers.append(finalizer) 1037 1038 def finish(self, request: SubRequest) -> None: 1039 exceptions: list[BaseException] = [] 1040 while self._finalizers: 1041 fin = self._finalizers.pop() 1042 try: 1043 fin() 1044 except BaseException as e: 1045 exceptions.append(e) 1046 node = request.node 1047 node.ihook.pytest_fixture_post_finalizer(fixturedef=self, request=request) 1048 # Even if finalization fails, we invalidate the cached fixture 1049 # value and remove all finalizers because they may be bound methods 1050 # which will keep instances alive. 1051 self.cached_result = None 1052 self._finalizers.clear() 1053 if len(exceptions) == 1: 1054 raise exceptions[0] 1055 elif len(exceptions) > 1: 1056 msg = f'errors while tearing down fixture "{self.argname}" of {node}' 1057 raise BaseExceptionGroup(msg, exceptions[::-1]) 1058 1059 def execute(self, request: SubRequest) -> FixtureValue: 1060 """Return the value of this fixture, executing it if not cached.""" 1061 # Ensure that the dependent fixtures requested by this fixture are loaded. 1062 # This needs to be done before checking if we have a cached value, since 1063 # if a dependent fixture has their cache invalidated, e.g. due to 1064 # parametrization, they finalize themselves and fixtures depending on it 1065 # (which will likely include this fixture) setting `self.cached_result = None`. 1066 # See #4871 1067 requested_fixtures_that_should_finalize_us = [] 1068 for argname in self.argnames: 1069 fixturedef = request._get_active_fixturedef(argname) 1070 # Saves requested fixtures in a list so we later can add our finalizer 1071 # to them, ensuring that if a requested fixture gets torn down we get torn 1072 # down first. This is generally handled by SetupState, but still currently 1073 # needed when this fixture is not parametrized but depends on a parametrized 1074 # fixture. 1075 requested_fixtures_that_should_finalize_us.append(fixturedef) 1076 1077 # Check for (and return) cached value/exception. 1078 if self.cached_result is not None: 1079 request_cache_key = self.cache_key(request) 1080 cache_key = self.cached_result[1] 1081 try: 1082 # Attempt to make a normal == check: this might fail for objects 1083 # which do not implement the standard comparison (like numpy arrays -- #6497). 1084 cache_hit = bool(request_cache_key == cache_key) 1085 except (ValueError, RuntimeError): 1086 # If the comparison raises, use 'is' as fallback. 1087 cache_hit = request_cache_key is cache_key 1088 1089 if cache_hit: 1090 if self.cached_result[2] is not None: 1091 exc, exc_tb = self.cached_result[2] 1092 raise exc.with_traceback(exc_tb) 1093 else: 1094 return self.cached_result[0] 1095 # We have a previous but differently parametrized fixture instance 1096 # so we need to tear it down before creating a new one. 1097 self.finish(request) 1098 assert self.cached_result is None 1099 1100 # Add finalizer to requested fixtures we saved previously. 1101 # We make sure to do this after checking for cached value to avoid 1102 # adding our finalizer multiple times. (#12135) 1103 finalizer = functools.partial(self.finish, request=request) 1104 for parent_fixture in requested_fixtures_that_should_finalize_us: 1105 parent_fixture.addfinalizer(finalizer) 1106 1107 ihook = request.node.ihook 1108 try: 1109 # Setup the fixture, run the code in it, and cache the value 1110 # in self.cached_result. 1111 result: FixtureValue = ihook.pytest_fixture_setup( 1112 fixturedef=self, request=request 1113 ) 1114 finally: 1115 # Schedule our finalizer, even if the setup failed. 1116 request.node.addfinalizer(finalizer) 1117 1118 return result 1119 1120 def cache_key(self, request: SubRequest) -> object: 1121 return getattr(request, "param", None) 1122 1123 def __repr__(self) -> str: 1124 return f"<FixtureDef argname={self.argname!r} scope={self.scope!r} baseid={self.baseid!r}>"
A container for a fixture definition.
Note: At this time, only explicitly documented fields and methods are considered public stable API.
968 def __init__( 969 self, 970 config: Config, 971 baseid: str | None, 972 argname: str, 973 func: _FixtureFunc[FixtureValue], 974 scope: Scope | _ScopeName | Callable[[str, Config], _ScopeName] | None, 975 params: Sequence[object] | None, 976 ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None, 977 *, 978 _ispytest: bool = False, 979 # only used in a deprecationwarning msg, can be removed in pytest9 980 _autouse: bool = False, 981 ) -> None: 982 check_ispytest(_ispytest) 983 # The "base" node ID for the fixture. 984 # 985 # This is a node ID prefix. A fixture is only available to a node (e.g. 986 # a `Function` item) if the fixture's baseid is a nodeid of a parent of 987 # node. 988 # 989 # For a fixture found in a Collector's object (e.g. a `Module`s module, 990 # a `Class`'s class), the baseid is the Collector's nodeid. 991 # 992 # For a fixture found in a conftest plugin, the baseid is the conftest's 993 # directory path relative to the rootdir. 994 # 995 # For other plugins, the baseid is the empty string (always matches). 996 self.baseid: Final = baseid or "" 997 # Whether the fixture was found from a node or a conftest in the 998 # collection tree. Will be false for fixtures defined in non-conftest 999 # plugins. 1000 self.has_location: Final = baseid is not None 1001 # The fixture factory function. 1002 self.func: Final = func 1003 # The name by which the fixture may be requested. 1004 self.argname: Final = argname 1005 if scope is None: 1006 scope = Scope.Function 1007 elif callable(scope): 1008 scope = _eval_scope_callable(scope, argname, config) 1009 if isinstance(scope, str): 1010 scope = Scope.from_user( 1011 scope, descr=f"Fixture '{func.__name__}'", where=baseid 1012 ) 1013 self._scope: Final = scope 1014 # If the fixture is directly parametrized, the parameter values. 1015 self.params: Final = params 1016 # If the fixture is directly parametrized, a tuple of explicit IDs to 1017 # assign to the parameter values, or a callable to generate an ID given 1018 # a parameter value. 1019 self.ids: Final = ids 1020 # The names requested by the fixtures. 1021 self.argnames: Final = getfuncargnames(func, name=argname) 1022 # If the fixture was executed, the current value of the fixture. 1023 # Can change if the fixture is executed with different parameters. 1024 self.cached_result: _FixtureCachedResult[FixtureValue] | None = None 1025 self._finalizers: Final[list[Callable[[], object]]] = [] 1026 1027 # only used to emit a deprecationwarning, can be removed in pytest9 1028 self._autouse = _autouse
1030 @property 1031 def scope(self) -> _ScopeName: 1032 """Scope string, one of "function", "class", "module", "package", "session".""" 1033 return self._scope.value
Scope string, one of "function", "class", "module", "package", "session".
1038 def finish(self, request: SubRequest) -> None: 1039 exceptions: list[BaseException] = [] 1040 while self._finalizers: 1041 fin = self._finalizers.pop() 1042 try: 1043 fin() 1044 except BaseException as e: 1045 exceptions.append(e) 1046 node = request.node 1047 node.ihook.pytest_fixture_post_finalizer(fixturedef=self, request=request) 1048 # Even if finalization fails, we invalidate the cached fixture 1049 # value and remove all finalizers because they may be bound methods 1050 # which will keep instances alive. 1051 self.cached_result = None 1052 self._finalizers.clear() 1053 if len(exceptions) == 1: 1054 raise exceptions[0] 1055 elif len(exceptions) > 1: 1056 msg = f'errors while tearing down fixture "{self.argname}" of {node}' 1057 raise BaseExceptionGroup(msg, exceptions[::-1])
1059 def execute(self, request: SubRequest) -> FixtureValue: 1060 """Return the value of this fixture, executing it if not cached.""" 1061 # Ensure that the dependent fixtures requested by this fixture are loaded. 1062 # This needs to be done before checking if we have a cached value, since 1063 # if a dependent fixture has their cache invalidated, e.g. due to 1064 # parametrization, they finalize themselves and fixtures depending on it 1065 # (which will likely include this fixture) setting `self.cached_result = None`. 1066 # See #4871 1067 requested_fixtures_that_should_finalize_us = [] 1068 for argname in self.argnames: 1069 fixturedef = request._get_active_fixturedef(argname) 1070 # Saves requested fixtures in a list so we later can add our finalizer 1071 # to them, ensuring that if a requested fixture gets torn down we get torn 1072 # down first. This is generally handled by SetupState, but still currently 1073 # needed when this fixture is not parametrized but depends on a parametrized 1074 # fixture. 1075 requested_fixtures_that_should_finalize_us.append(fixturedef) 1076 1077 # Check for (and return) cached value/exception. 1078 if self.cached_result is not None: 1079 request_cache_key = self.cache_key(request) 1080 cache_key = self.cached_result[1] 1081 try: 1082 # Attempt to make a normal == check: this might fail for objects 1083 # which do not implement the standard comparison (like numpy arrays -- #6497). 1084 cache_hit = bool(request_cache_key == cache_key) 1085 except (ValueError, RuntimeError): 1086 # If the comparison raises, use 'is' as fallback. 1087 cache_hit = request_cache_key is cache_key 1088 1089 if cache_hit: 1090 if self.cached_result[2] is not None: 1091 exc, exc_tb = self.cached_result[2] 1092 raise exc.with_traceback(exc_tb) 1093 else: 1094 return self.cached_result[0] 1095 # We have a previous but differently parametrized fixture instance 1096 # so we need to tear it down before creating a new one. 1097 self.finish(request) 1098 assert self.cached_result is None 1099 1100 # Add finalizer to requested fixtures we saved previously. 1101 # We make sure to do this after checking for cached value to avoid 1102 # adding our finalizer multiple times. (#12135) 1103 finalizer = functools.partial(self.finish, request=request) 1104 for parent_fixture in requested_fixtures_that_should_finalize_us: 1105 parent_fixture.addfinalizer(finalizer) 1106 1107 ihook = request.node.ihook 1108 try: 1109 # Setup the fixture, run the code in it, and cache the value 1110 # in self.cached_result. 1111 result: FixtureValue = ihook.pytest_fixture_setup( 1112 fixturedef=self, request=request 1113 ) 1114 finally: 1115 # Schedule our finalizer, even if the setup failed. 1116 request.node.addfinalizer(finalizer) 1117 1118 return result
Return the value of this fixture, executing it if not cached.
801@final 802class FixtureLookupError(LookupError): 803 """Could not return a requested fixture (missing or invalid).""" 804 805 def __init__( 806 self, argname: str | None, request: FixtureRequest, msg: str | None = None 807 ) -> None: 808 self.argname = argname 809 self.request = request 810 self.fixturestack = request._get_fixturestack() 811 self.msg = msg 812 813 def formatrepr(self) -> FixtureLookupErrorRepr: 814 tblines: list[str] = [] 815 addline = tblines.append 816 stack = [self.request._pyfuncitem.obj] 817 stack.extend(map(lambda x: x.func, self.fixturestack)) 818 msg = self.msg 819 # This function currently makes an assumption that a non-None msg means we 820 # have a non-empty `self.fixturestack`. This is currently true, but if 821 # somebody at some point want to extend the use of FixtureLookupError to 822 # new cases it might break. 823 # Add the assert to make it clearer to developer that this will fail, otherwise 824 # it crashes because `fspath` does not get set due to `stack` being empty. 825 assert self.msg is None or self.fixturestack, ( 826 "formatrepr assumptions broken, rewrite it to handle it" 827 ) 828 if msg is not None: 829 # The last fixture raise an error, let's present 830 # it at the requesting side. 831 stack = stack[:-1] 832 for function in stack: 833 fspath, lineno = getfslineno(function) 834 try: 835 lines, _ = inspect.getsourcelines(get_real_func(function)) 836 except (OSError, IndexError, TypeError): 837 error_msg = "file %s, line %s: source code not available" 838 addline(error_msg % (fspath, lineno + 1)) 839 else: 840 addline(f"file {fspath}, line {lineno + 1}") 841 for i, line in enumerate(lines): 842 line = line.rstrip() 843 addline(" " + line) 844 if line.lstrip().startswith("def"): 845 break 846 847 if msg is None: 848 fm = self.request._fixturemanager 849 available = set() 850 parent = self.request._pyfuncitem.parent 851 assert parent is not None 852 for name, fixturedefs in fm._arg2fixturedefs.items(): 853 faclist = list(fm._matchfactories(fixturedefs, parent)) 854 if faclist: 855 available.add(name) 856 if self.argname in available: 857 msg = ( 858 f" recursive dependency involving fixture '{self.argname}' detected" 859 ) 860 else: 861 msg = f"fixture '{self.argname}' not found" 862 msg += "\n available fixtures: {}".format(", ".join(sorted(available))) 863 msg += "\n use 'pytest --fixtures [testpath]' for help on them." 864 865 return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
Could not return a requested fixture (missing or invalid).
813 def formatrepr(self) -> FixtureLookupErrorRepr: 814 tblines: list[str] = [] 815 addline = tblines.append 816 stack = [self.request._pyfuncitem.obj] 817 stack.extend(map(lambda x: x.func, self.fixturestack)) 818 msg = self.msg 819 # This function currently makes an assumption that a non-None msg means we 820 # have a non-empty `self.fixturestack`. This is currently true, but if 821 # somebody at some point want to extend the use of FixtureLookupError to 822 # new cases it might break. 823 # Add the assert to make it clearer to developer that this will fail, otherwise 824 # it crashes because `fspath` does not get set due to `stack` being empty. 825 assert self.msg is None or self.fixturestack, ( 826 "formatrepr assumptions broken, rewrite it to handle it" 827 ) 828 if msg is not None: 829 # The last fixture raise an error, let's present 830 # it at the requesting side. 831 stack = stack[:-1] 832 for function in stack: 833 fspath, lineno = getfslineno(function) 834 try: 835 lines, _ = inspect.getsourcelines(get_real_func(function)) 836 except (OSError, IndexError, TypeError): 837 error_msg = "file %s, line %s: source code not available" 838 addline(error_msg % (fspath, lineno + 1)) 839 else: 840 addline(f"file {fspath}, line {lineno + 1}") 841 for i, line in enumerate(lines): 842 line = line.rstrip() 843 addline(" " + line) 844 if line.lstrip().startswith("def"): 845 break 846 847 if msg is None: 848 fm = self.request._fixturemanager 849 available = set() 850 parent = self.request._pyfuncitem.parent 851 assert parent is not None 852 for name, fixturedefs in fm._arg2fixturedefs.items(): 853 faclist = list(fm._matchfactories(fixturedefs, parent)) 854 if faclist: 855 available.add(name) 856 if self.argname in available: 857 msg = ( 858 f" recursive dependency involving fixture '{self.argname}' detected" 859 ) 860 else: 861 msg = f"fixture '{self.argname}' not found" 862 msg += "\n available fixtures: {}".format(", ".join(sorted(available))) 863 msg += "\n use 'pytest --fixtures [testpath]' for help on them." 864 865 return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
363class FixtureRequest(abc.ABC): 364 """The type of the ``request`` fixture. 365 366 A request object gives access to the requesting test context and has a 367 ``param`` attribute in case the fixture is parametrized. 368 """ 369 370 def __init__( 371 self, 372 pyfuncitem: Function, 373 fixturename: str | None, 374 arg2fixturedefs: dict[str, Sequence[FixtureDef[Any]]], 375 fixture_defs: dict[str, FixtureDef[Any]], 376 *, 377 _ispytest: bool = False, 378 ) -> None: 379 check_ispytest(_ispytest) 380 #: Fixture for which this request is being performed. 381 self.fixturename: Final = fixturename 382 self._pyfuncitem: Final = pyfuncitem 383 # The FixtureDefs for each fixture name requested by this item. 384 # Starts from the statically-known fixturedefs resolved during 385 # collection. Dynamically requested fixtures (using 386 # `request.getfixturevalue("foo")`) are added dynamically. 387 self._arg2fixturedefs: Final = arg2fixturedefs 388 # The evaluated argnames so far, mapping to the FixtureDef they resolved 389 # to. 390 self._fixture_defs: Final = fixture_defs 391 # Notes on the type of `param`: 392 # -`request.param` is only defined in parametrized fixtures, and will raise 393 # AttributeError otherwise. Python typing has no notion of "undefined", so 394 # this cannot be reflected in the type. 395 # - Technically `param` is only (possibly) defined on SubRequest, not 396 # FixtureRequest, but the typing of that is still in flux so this cheats. 397 # - In the future we might consider using a generic for the param type, but 398 # for now just using Any. 399 self.param: Any 400 401 @property 402 def _fixturemanager(self) -> FixtureManager: 403 return self._pyfuncitem.session._fixturemanager 404 405 @property 406 @abc.abstractmethod 407 def _scope(self) -> Scope: 408 raise NotImplementedError() 409 410 @property 411 def scope(self) -> _ScopeName: 412 """Scope string, one of "function", "class", "module", "package", "session".""" 413 return self._scope.value 414 415 @abc.abstractmethod 416 def _check_scope( 417 self, 418 requested_fixturedef: FixtureDef[object], 419 requested_scope: Scope, 420 ) -> None: 421 raise NotImplementedError() 422 423 @property 424 def fixturenames(self) -> list[str]: 425 """Names of all active fixtures in this request.""" 426 result = list(self._pyfuncitem.fixturenames) 427 result.extend(set(self._fixture_defs).difference(result)) 428 return result 429 430 @property 431 @abc.abstractmethod 432 def node(self): 433 """Underlying collection node (depends on current request scope).""" 434 raise NotImplementedError() 435 436 @property 437 def config(self) -> Config: 438 """The pytest config object associated with this request.""" 439 return self._pyfuncitem.config 440 441 @property 442 def function(self): 443 """Test function object if the request has a per-function scope.""" 444 if self.scope != "function": 445 raise AttributeError( 446 f"function not available in {self.scope}-scoped context" 447 ) 448 return self._pyfuncitem.obj 449 450 @property 451 def cls(self): 452 """Class (can be None) where the test function was collected.""" 453 if self.scope not in ("class", "function"): 454 raise AttributeError(f"cls not available in {self.scope}-scoped context") 455 clscol = self._pyfuncitem.getparent(_pytest.python.Class) 456 if clscol: 457 return clscol.obj 458 459 @property 460 def instance(self): 461 """Instance (can be None) on which test function was collected.""" 462 if self.scope != "function": 463 return None 464 return getattr(self._pyfuncitem, "instance", None) 465 466 @property 467 def module(self): 468 """Python module object where the test function was collected.""" 469 if self.scope not in ("function", "class", "module"): 470 raise AttributeError(f"module not available in {self.scope}-scoped context") 471 mod = self._pyfuncitem.getparent(_pytest.python.Module) 472 assert mod is not None 473 return mod.obj 474 475 @property 476 def path(self) -> Path: 477 """Path where the test function was collected.""" 478 if self.scope not in ("function", "class", "module", "package"): 479 raise AttributeError(f"path not available in {self.scope}-scoped context") 480 return self._pyfuncitem.path 481 482 @property 483 def keywords(self) -> MutableMapping[str, Any]: 484 """Keywords/markers dictionary for the underlying node.""" 485 node: nodes.Node = self.node 486 return node.keywords 487 488 @property 489 def session(self) -> Session: 490 """Pytest session object.""" 491 return self._pyfuncitem.session 492 493 @abc.abstractmethod 494 def addfinalizer(self, finalizer: Callable[[], object]) -> None: 495 """Add finalizer/teardown function to be called without arguments after 496 the last test within the requesting test context finished execution.""" 497 raise NotImplementedError() 498 499 def applymarker(self, marker: str | MarkDecorator) -> None: 500 """Apply a marker to a single test function invocation. 501 502 This method is useful if you don't want to have a keyword/marker 503 on all function invocations. 504 505 :param marker: 506 An object created by a call to ``pytest.mark.NAME(...)``. 507 """ 508 self.node.add_marker(marker) 509 510 def raiseerror(self, msg: str | None) -> NoReturn: 511 """Raise a FixtureLookupError exception. 512 513 :param msg: 514 An optional custom error message. 515 """ 516 raise FixtureLookupError(None, self, msg) 517 518 def getfixturevalue(self, argname: str) -> Any: 519 """Dynamically run a named fixture function. 520 521 Declaring fixtures via function argument is recommended where possible. 522 But if you can only decide whether to use another fixture at test 523 setup time, you may use this function to retrieve it inside a fixture 524 or test function body. 525 526 This method can be used during the test setup phase or the test run 527 phase, but during the test teardown phase a fixture's value may not 528 be available. 529 530 :param argname: 531 The fixture name. 532 :raises pytest.FixtureLookupError: 533 If the given fixture could not be found. 534 """ 535 # Note that in addition to the use case described in the docstring, 536 # getfixturevalue() is also called by pytest itself during item and fixture 537 # setup to evaluate the fixtures that are requested statically 538 # (using function parameters, autouse, etc). 539 540 fixturedef = self._get_active_fixturedef(argname) 541 assert fixturedef.cached_result is not None, ( 542 f'The fixture value for "{argname}" is not available. ' 543 "This can happen when the fixture has already been torn down." 544 ) 545 return fixturedef.cached_result[0] 546 547 def _iter_chain(self) -> Iterator[SubRequest]: 548 """Yield all SubRequests in the chain, from self up. 549 550 Note: does *not* yield the TopRequest. 551 """ 552 current = self 553 while isinstance(current, SubRequest): 554 yield current 555 current = current._parent_request 556 557 def _get_active_fixturedef(self, argname: str) -> FixtureDef[object]: 558 if argname == "request": 559 return RequestFixtureDef(self) 560 561 # If we already finished computing a fixture by this name in this item, 562 # return it. 563 fixturedef = self._fixture_defs.get(argname) 564 if fixturedef is not None: 565 self._check_scope(fixturedef, fixturedef._scope) 566 return fixturedef 567 568 # Find the appropriate fixturedef. 569 fixturedefs = self._arg2fixturedefs.get(argname, None) 570 if fixturedefs is None: 571 # We arrive here because of a dynamic call to 572 # getfixturevalue(argname) which was naturally 573 # not known at parsing/collection time. 574 fixturedefs = self._fixturemanager.getfixturedefs(argname, self._pyfuncitem) 575 if fixturedefs is not None: 576 self._arg2fixturedefs[argname] = fixturedefs 577 # No fixtures defined with this name. 578 if fixturedefs is None: 579 raise FixtureLookupError(argname, self) 580 # The are no fixtures with this name applicable for the function. 581 if not fixturedefs: 582 raise FixtureLookupError(argname, self) 583 584 # A fixture may override another fixture with the same name, e.g. a 585 # fixture in a module can override a fixture in a conftest, a fixture in 586 # a class can override a fixture in the module, and so on. 587 # An overriding fixture can request its own name (possibly indirectly); 588 # in this case it gets the value of the fixture it overrides, one level 589 # up. 590 # Check how many `argname`s deep we are, and take the next one. 591 # `fixturedefs` is sorted from furthest to closest, so use negative 592 # indexing to go in reverse. 593 index = -1 594 for request in self._iter_chain(): 595 if request.fixturename == argname: 596 index -= 1 597 # If already consumed all of the available levels, fail. 598 if -index > len(fixturedefs): 599 raise FixtureLookupError(argname, self) 600 fixturedef = fixturedefs[index] 601 602 # Prepare a SubRequest object for calling the fixture. 603 try: 604 callspec = self._pyfuncitem.callspec 605 except AttributeError: 606 callspec = None 607 if callspec is not None and argname in callspec.params: 608 param = callspec.params[argname] 609 param_index = callspec.indices[argname] 610 # The parametrize invocation scope overrides the fixture's scope. 611 scope = callspec._arg2scope[argname] 612 else: 613 param = NOTSET 614 param_index = 0 615 scope = fixturedef._scope 616 self._check_fixturedef_without_param(fixturedef) 617 # The parametrize invocation scope only controls caching behavior while 618 # allowing wider-scoped fixtures to keep depending on the parametrized 619 # fixture. Scope control is enforced for parametrized fixtures 620 # by recreating the whole fixture tree on parameter change. 621 # Hence `fixturedef._scope`, not `scope`. 622 self._check_scope(fixturedef, fixturedef._scope) 623 subrequest = SubRequest( 624 self, scope, param, param_index, fixturedef, _ispytest=True 625 ) 626 627 # Make sure the fixture value is cached, running it if it isn't 628 fixturedef.execute(request=subrequest) 629 630 self._fixture_defs[argname] = fixturedef 631 return fixturedef 632 633 def _check_fixturedef_without_param(self, fixturedef: FixtureDef[object]) -> None: 634 """Check that this request is allowed to execute this fixturedef without 635 a param.""" 636 funcitem = self._pyfuncitem 637 has_params = fixturedef.params is not None 638 fixtures_not_supported = getattr(funcitem, "nofuncargs", False) 639 if has_params and fixtures_not_supported: 640 msg = ( 641 f"{funcitem.name} does not support fixtures, maybe unittest.TestCase subclass?\n" 642 f"Node id: {funcitem.nodeid}\n" 643 f"Function type: {type(funcitem).__name__}" 644 ) 645 fail(msg, pytrace=False) 646 if has_params: 647 frame = inspect.stack()[3] 648 frameinfo = inspect.getframeinfo(frame[0]) 649 source_path = absolutepath(frameinfo.filename) 650 source_lineno = frameinfo.lineno 651 try: 652 source_path_str = str(source_path.relative_to(funcitem.config.rootpath)) 653 except ValueError: 654 source_path_str = str(source_path) 655 location = getlocation(fixturedef.func, funcitem.config.rootpath) 656 msg = ( 657 "The requested fixture has no parameter defined for test:\n" 658 f" {funcitem.nodeid}\n\n" 659 f"Requested fixture '{fixturedef.argname}' defined in:\n" 660 f"{location}\n\n" 661 f"Requested here:\n" 662 f"{source_path_str}:{source_lineno}" 663 ) 664 fail(msg, pytrace=False) 665 666 def _get_fixturestack(self) -> list[FixtureDef[Any]]: 667 values = [request._fixturedef for request in self._iter_chain()] 668 values.reverse() 669 return values
The type of the request fixture.
A request object gives access to the requesting test context and has a
param attribute in case the fixture is parametrized.
410 @property 411 def scope(self) -> _ScopeName: 412 """Scope string, one of "function", "class", "module", "package", "session".""" 413 return self._scope.value
Scope string, one of "function", "class", "module", "package", "session".
423 @property 424 def fixturenames(self) -> list[str]: 425 """Names of all active fixtures in this request.""" 426 result = list(self._pyfuncitem.fixturenames) 427 result.extend(set(self._fixture_defs).difference(result)) 428 return result
Names of all active fixtures in this request.
430 @property 431 @abc.abstractmethod 432 def node(self): 433 """Underlying collection node (depends on current request scope).""" 434 raise NotImplementedError()
Underlying collection node (depends on current request scope).
436 @property 437 def config(self) -> Config: 438 """The pytest config object associated with this request.""" 439 return self._pyfuncitem.config
The pytest config object associated with this request.
441 @property 442 def function(self): 443 """Test function object if the request has a per-function scope.""" 444 if self.scope != "function": 445 raise AttributeError( 446 f"function not available in {self.scope}-scoped context" 447 ) 448 return self._pyfuncitem.obj
Test function object if the request has a per-function scope.
450 @property 451 def cls(self): 452 """Class (can be None) where the test function was collected.""" 453 if self.scope not in ("class", "function"): 454 raise AttributeError(f"cls not available in {self.scope}-scoped context") 455 clscol = self._pyfuncitem.getparent(_pytest.python.Class) 456 if clscol: 457 return clscol.obj
Class (can be None) where the test function was collected.
459 @property 460 def instance(self): 461 """Instance (can be None) on which test function was collected.""" 462 if self.scope != "function": 463 return None 464 return getattr(self._pyfuncitem, "instance", None)
Instance (can be None) on which test function was collected.
466 @property 467 def module(self): 468 """Python module object where the test function was collected.""" 469 if self.scope not in ("function", "class", "module"): 470 raise AttributeError(f"module not available in {self.scope}-scoped context") 471 mod = self._pyfuncitem.getparent(_pytest.python.Module) 472 assert mod is not None 473 return mod.obj
Python module object where the test function was collected.
475 @property 476 def path(self) -> Path: 477 """Path where the test function was collected.""" 478 if self.scope not in ("function", "class", "module", "package"): 479 raise AttributeError(f"path not available in {self.scope}-scoped context") 480 return self._pyfuncitem.path
Path where the test function was collected.
482 @property 483 def keywords(self) -> MutableMapping[str, Any]: 484 """Keywords/markers dictionary for the underlying node.""" 485 node: nodes.Node = self.node 486 return node.keywords
Keywords/markers dictionary for the underlying node.
488 @property 489 def session(self) -> Session: 490 """Pytest session object.""" 491 return self._pyfuncitem.session
Pytest session object.
493 @abc.abstractmethod 494 def addfinalizer(self, finalizer: Callable[[], object]) -> None: 495 """Add finalizer/teardown function to be called without arguments after 496 the last test within the requesting test context finished execution.""" 497 raise NotImplementedError()
Add finalizer/teardown function to be called without arguments after the last test within the requesting test context finished execution.
499 def applymarker(self, marker: str | MarkDecorator) -> None: 500 """Apply a marker to a single test function invocation. 501 502 This method is useful if you don't want to have a keyword/marker 503 on all function invocations. 504 505 :param marker: 506 An object created by a call to ``pytest.mark.NAME(...)``. 507 """ 508 self.node.add_marker(marker)
Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker on all function invocations.
Parameters
- marker:
An object created by a call to
pytest.mark.NAME(...).
510 def raiseerror(self, msg: str | None) -> NoReturn: 511 """Raise a FixtureLookupError exception. 512 513 :param msg: 514 An optional custom error message. 515 """ 516 raise FixtureLookupError(None, self, msg)
Raise a FixtureLookupError exception.
Parameters
- msg: An optional custom error message.
518 def getfixturevalue(self, argname: str) -> Any: 519 """Dynamically run a named fixture function. 520 521 Declaring fixtures via function argument is recommended where possible. 522 But if you can only decide whether to use another fixture at test 523 setup time, you may use this function to retrieve it inside a fixture 524 or test function body. 525 526 This method can be used during the test setup phase or the test run 527 phase, but during the test teardown phase a fixture's value may not 528 be available. 529 530 :param argname: 531 The fixture name. 532 :raises pytest.FixtureLookupError: 533 If the given fixture could not be found. 534 """ 535 # Note that in addition to the use case described in the docstring, 536 # getfixturevalue() is also called by pytest itself during item and fixture 537 # setup to evaluate the fixtures that are requested statically 538 # (using function parameters, autouse, etc). 539 540 fixturedef = self._get_active_fixturedef(argname) 541 assert fixturedef.cached_result is not None, ( 542 f'The fixture value for "{argname}" is not available. ' 543 "This can happen when the fixture has already been torn down." 544 ) 545 return fixturedef.cached_result[0]
Dynamically run a named fixture function.
Declaring fixtures via function argument is recommended where possible. But if you can only decide whether to use another fixture at test setup time, you may use this function to retrieve it inside a fixture or test function body.
This method can be used during the test setup phase or the test run phase, but during the test teardown phase a fixture's value may not be available.
Parameters
- argname: The fixture name.
Raises
- pytest.FixtureLookupError: If the given fixture could not be found.
1588class Function(PyobjMixin, nodes.Item): 1589 """Item responsible for setting up and executing a Python test function. 1590 1591 :param name: 1592 The full function name, including any decorations like those 1593 added by parametrization (``my_func[my_param]``). 1594 :param parent: 1595 The parent Node. 1596 :param config: 1597 The pytest Config object. 1598 :param callspec: 1599 If given, this function has been parametrized and the callspec contains 1600 meta information about the parametrization. 1601 :param callobj: 1602 If given, the object which will be called when the Function is invoked, 1603 otherwise the callobj will be obtained from ``parent`` using ``originalname``. 1604 :param keywords: 1605 Keywords bound to the function object for "-k" matching. 1606 :param session: 1607 The pytest Session object. 1608 :param fixtureinfo: 1609 Fixture information already resolved at this fixture node.. 1610 :param originalname: 1611 The attribute name to use for accessing the underlying function object. 1612 Defaults to ``name``. Set this if name is different from the original name, 1613 for example when it contains decorations like those added by parametrization 1614 (``my_func[my_param]``). 1615 """ 1616 1617 # Disable since functions handle it themselves. 1618 _ALLOW_MARKERS = False 1619 1620 def __init__( 1621 self, 1622 name: str, 1623 parent, 1624 config: Config | None = None, 1625 callspec: CallSpec2 | None = None, 1626 callobj=NOTSET, 1627 keywords: Mapping[str, Any] | None = None, 1628 session: Session | None = None, 1629 fixtureinfo: FuncFixtureInfo | None = None, 1630 originalname: str | None = None, 1631 ) -> None: 1632 super().__init__(name, parent, config=config, session=session) 1633 1634 if callobj is not NOTSET: 1635 self._obj = callobj 1636 self._instance = getattr(callobj, "__self__", None) 1637 1638 #: Original function name, without any decorations (for example 1639 #: parametrization adds a ``"[...]"`` suffix to function names), used to access 1640 #: the underlying function object from ``parent`` (in case ``callobj`` is not given 1641 #: explicitly). 1642 #: 1643 #: .. versionadded:: 3.0 1644 self.originalname = originalname or name 1645 1646 # Note: when FunctionDefinition is introduced, we should change ``originalname`` 1647 # to a readonly property that returns FunctionDefinition.name. 1648 1649 self.own_markers.extend(get_unpacked_marks(self.obj)) 1650 if callspec: 1651 self.callspec = callspec 1652 self.own_markers.extend(callspec.marks) 1653 1654 # todo: this is a hell of a hack 1655 # https://github.com/pytest-dev/pytest/issues/4569 1656 # Note: the order of the updates is important here; indicates what 1657 # takes priority (ctor argument over function attributes over markers). 1658 # Take own_markers only; NodeKeywords handles parent traversal on its own. 1659 self.keywords.update((mark.name, mark) for mark in self.own_markers) 1660 self.keywords.update(self.obj.__dict__) 1661 if keywords: 1662 self.keywords.update(keywords) 1663 1664 if fixtureinfo is None: 1665 fm = self.session._fixturemanager 1666 fixtureinfo = fm.getfixtureinfo(self, self.obj, self.cls) 1667 self._fixtureinfo: FuncFixtureInfo = fixtureinfo 1668 self.fixturenames = fixtureinfo.names_closure 1669 self._initrequest() 1670 1671 # todo: determine sound type limitations 1672 @classmethod 1673 def from_parent(cls, parent, **kw) -> Self: 1674 """The public constructor.""" 1675 return super().from_parent(parent=parent, **kw) 1676 1677 def _initrequest(self) -> None: 1678 self.funcargs: dict[str, object] = {} 1679 self._request = fixtures.TopRequest(self, _ispytest=True) 1680 1681 @property 1682 def function(self): 1683 """Underlying python 'function' object.""" 1684 return getimfunc(self.obj) 1685 1686 @property 1687 def instance(self): 1688 try: 1689 return self._instance 1690 except AttributeError: 1691 if isinstance(self.parent, Class): 1692 # Each Function gets a fresh class instance. 1693 self._instance = self._getinstance() 1694 else: 1695 self._instance = None 1696 return self._instance 1697 1698 def _getinstance(self): 1699 if isinstance(self.parent, Class): 1700 # Each Function gets a fresh class instance. 1701 return self.parent.newinstance() 1702 else: 1703 return None 1704 1705 def _getobj(self): 1706 instance = self.instance 1707 if instance is not None: 1708 parent_obj = instance 1709 else: 1710 assert self.parent is not None 1711 parent_obj = self.parent.obj # type: ignore[attr-defined] 1712 return getattr(parent_obj, self.originalname) 1713 1714 @property 1715 def _pyfuncitem(self): 1716 """(compatonly) for code expecting pytest-2.2 style request objects.""" 1717 return self 1718 1719 def runtest(self) -> None: 1720 """Execute the underlying test function.""" 1721 self.ihook.pytest_pyfunc_call(pyfuncitem=self) 1722 1723 def setup(self) -> None: 1724 self._request._fillfixtures() 1725 1726 def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: 1727 if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False): 1728 code = _pytest._code.Code.from_function(get_real_func(self.obj)) 1729 path, firstlineno = code.path, code.firstlineno 1730 traceback = excinfo.traceback 1731 ntraceback = traceback.cut(path=path, firstlineno=firstlineno) 1732 if ntraceback == traceback: 1733 ntraceback = ntraceback.cut(path=path) 1734 if ntraceback == traceback: 1735 ntraceback = ntraceback.filter(filter_traceback) 1736 if not ntraceback: 1737 ntraceback = traceback 1738 ntraceback = ntraceback.filter(excinfo) 1739 1740 # issue364: mark all but first and last frames to 1741 # only show a single-line message for each frame. 1742 if self.config.getoption("tbstyle", "auto") == "auto": 1743 if len(ntraceback) > 2: 1744 ntraceback = Traceback( 1745 ( 1746 ntraceback[0], 1747 *(t.with_repr_style("short") for t in ntraceback[1:-1]), 1748 ntraceback[-1], 1749 ) 1750 ) 1751 1752 return ntraceback 1753 return excinfo.traceback 1754 1755 # TODO: Type ignored -- breaks Liskov Substitution. 1756 def repr_failure( # type: ignore[override] 1757 self, 1758 excinfo: ExceptionInfo[BaseException], 1759 ) -> str | TerminalRepr: 1760 style = self.config.getoption("tbstyle", "auto") 1761 if style == "auto": 1762 style = "long" 1763 return self._repr_failure_py(excinfo, style=style)
Item responsible for setting up and executing a Python test function.
Parameters
- name:
The full function name, including any decorations like those
added by parametrization (
my_func[my_param]). - parent: The parent Node.
- config: The pytest Config object.
- callspec: If given, this function has been parametrized and the callspec contains meta information about the parametrization.
- callobj:
If given, the object which will be called when the Function is invoked,
otherwise the callobj will be obtained from
parentusingoriginalname. - keywords: Keywords bound to the function object for "-k" matching.
- session: The pytest Session object.
- fixtureinfo: Fixture information already resolved at this fixture node..
- originalname:
The attribute name to use for accessing the underlying function object.
Defaults to
name. Set this if name is different from the original name, for example when it contains decorations like those added by parametrization (my_func[my_param]).
1620 def __init__( 1621 self, 1622 name: str, 1623 parent, 1624 config: Config | None = None, 1625 callspec: CallSpec2 | None = None, 1626 callobj=NOTSET, 1627 keywords: Mapping[str, Any] | None = None, 1628 session: Session | None = None, 1629 fixtureinfo: FuncFixtureInfo | None = None, 1630 originalname: str | None = None, 1631 ) -> None: 1632 super().__init__(name, parent, config=config, session=session) 1633 1634 if callobj is not NOTSET: 1635 self._obj = callobj 1636 self._instance = getattr(callobj, "__self__", None) 1637 1638 #: Original function name, without any decorations (for example 1639 #: parametrization adds a ``"[...]"`` suffix to function names), used to access 1640 #: the underlying function object from ``parent`` (in case ``callobj`` is not given 1641 #: explicitly). 1642 #: 1643 #: .. versionadded:: 3.0 1644 self.originalname = originalname or name 1645 1646 # Note: when FunctionDefinition is introduced, we should change ``originalname`` 1647 # to a readonly property that returns FunctionDefinition.name. 1648 1649 self.own_markers.extend(get_unpacked_marks(self.obj)) 1650 if callspec: 1651 self.callspec = callspec 1652 self.own_markers.extend(callspec.marks) 1653 1654 # todo: this is a hell of a hack 1655 # https://github.com/pytest-dev/pytest/issues/4569 1656 # Note: the order of the updates is important here; indicates what 1657 # takes priority (ctor argument over function attributes over markers). 1658 # Take own_markers only; NodeKeywords handles parent traversal on its own. 1659 self.keywords.update((mark.name, mark) for mark in self.own_markers) 1660 self.keywords.update(self.obj.__dict__) 1661 if keywords: 1662 self.keywords.update(keywords) 1663 1664 if fixtureinfo is None: 1665 fm = self.session._fixturemanager 1666 fixtureinfo = fm.getfixtureinfo(self, self.obj, self.cls) 1667 self._fixtureinfo: FuncFixtureInfo = fixtureinfo 1668 self.fixturenames = fixtureinfo.names_closure 1669 self._initrequest()
1672 @classmethod 1673 def from_parent(cls, parent, **kw) -> Self: 1674 """The public constructor.""" 1675 return super().from_parent(parent=parent, **kw)
The public constructor.
1681 @property 1682 def function(self): 1683 """Underlying python 'function' object.""" 1684 return getimfunc(self.obj)
Underlying python 'function' object.
1686 @property 1687 def instance(self): 1688 try: 1689 return self._instance 1690 except AttributeError: 1691 if isinstance(self.parent, Class): 1692 # Each Function gets a fresh class instance. 1693 self._instance = self._getinstance() 1694 else: 1695 self._instance = None 1696 return self._instance
Python instance object the function is bound to.
Returns None if not a test method, e.g. for a standalone test function, a class or a module.
1719 def runtest(self) -> None: 1720 """Execute the underlying test function.""" 1721 self.ihook.pytest_pyfunc_call(pyfuncitem=self)
Execute the underlying test function.
1756 def repr_failure( # type: ignore[override] 1757 self, 1758 excinfo: ExceptionInfo[BaseException], 1759 ) -> str | TerminalRepr: 1760 style = self.config.getoption("tbstyle", "auto") 1761 if style == "auto": 1762 style = "long" 1763 return self._repr_failure_py(excinfo, style=style)
Return a representation of a collection or test failure.
seealso :ref:non-python tests.
Parameters
- excinfo: Exception information for the failure.
249@final 250class HookRecorder: 251 """Record all hooks called in a plugin manager. 252 253 Hook recorders are created by :class:`Pytester`. 254 255 This wraps all the hook calls in the plugin manager, recording each call 256 before propagating the normal calls. 257 """ 258 259 def __init__( 260 self, pluginmanager: PytestPluginManager, *, _ispytest: bool = False 261 ) -> None: 262 check_ispytest(_ispytest) 263 264 self._pluginmanager = pluginmanager 265 self.calls: list[RecordedHookCall] = [] 266 self.ret: int | ExitCode | None = None 267 268 def before(hook_name: str, hook_impls, kwargs) -> None: 269 self.calls.append(RecordedHookCall(hook_name, kwargs)) 270 271 def after(outcome, hook_name: str, hook_impls, kwargs) -> None: 272 pass 273 274 self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after) 275 276 def finish_recording(self) -> None: 277 self._undo_wrapping() 278 279 def getcalls(self, names: str | Iterable[str]) -> list[RecordedHookCall]: 280 """Get all recorded calls to hooks with the given names (or name).""" 281 if isinstance(names, str): 282 names = names.split() 283 return [call for call in self.calls if call._name in names] 284 285 def assert_contains(self, entries: Sequence[tuple[str, str]]) -> None: 286 __tracebackhide__ = True 287 i = 0 288 entries = list(entries) 289 # Since Python 3.13, f_locals is not a dict, but eval requires a dict. 290 backlocals = dict(sys._getframe(1).f_locals) 291 while entries: 292 name, check = entries.pop(0) 293 for ind, call in enumerate(self.calls[i:]): 294 if call._name == name: 295 print("NAMEMATCH", name, call) 296 if eval(check, backlocals, call.__dict__): 297 print("CHECKERMATCH", repr(check), "->", call) 298 else: 299 print("NOCHECKERMATCH", repr(check), "-", call) 300 continue 301 i += ind + 1 302 break 303 print("NONAMEMATCH", name, "with", call) 304 else: 305 fail(f"could not find {name!r} check {check!r}") 306 307 def popcall(self, name: str) -> RecordedHookCall: 308 __tracebackhide__ = True 309 for i, call in enumerate(self.calls): 310 if call._name == name: 311 del self.calls[i] 312 return call 313 lines = [f"could not find call {name!r}, in:"] 314 lines.extend([f" {x}" for x in self.calls]) 315 fail("\n".join(lines)) 316 317 def getcall(self, name: str) -> RecordedHookCall: 318 values = self.getcalls(name) 319 assert len(values) == 1, (name, values) 320 return values[0] 321 322 # functionality for test reports 323 324 @overload 325 def getreports( 326 self, 327 names: Literal["pytest_collectreport"], 328 ) -> Sequence[CollectReport]: ... 329 330 @overload 331 def getreports( 332 self, 333 names: Literal["pytest_runtest_logreport"], 334 ) -> Sequence[TestReport]: ... 335 336 @overload 337 def getreports( 338 self, 339 names: str | Iterable[str] = ( 340 "pytest_collectreport", 341 "pytest_runtest_logreport", 342 ), 343 ) -> Sequence[CollectReport | TestReport]: ... 344 345 def getreports( 346 self, 347 names: str | Iterable[str] = ( 348 "pytest_collectreport", 349 "pytest_runtest_logreport", 350 ), 351 ) -> Sequence[CollectReport | TestReport]: 352 return [x.report for x in self.getcalls(names)] 353 354 def matchreport( 355 self, 356 inamepart: str = "", 357 names: str | Iterable[str] = ( 358 "pytest_runtest_logreport", 359 "pytest_collectreport", 360 ), 361 when: str | None = None, 362 ) -> CollectReport | TestReport: 363 """Return a testreport whose dotted import path matches.""" 364 values = [] 365 for rep in self.getreports(names=names): 366 if not when and rep.when != "call" and rep.passed: 367 # setup/teardown passing reports - let's ignore those 368 continue 369 if when and rep.when != when: 370 continue 371 if not inamepart or inamepart in rep.nodeid.split("::"): 372 values.append(rep) 373 if not values: 374 raise ValueError( 375 f"could not find test report matching {inamepart!r}: " 376 "no test reports at all!" 377 ) 378 if len(values) > 1: 379 raise ValueError( 380 f"found 2 or more testreports matching {inamepart!r}: {values}" 381 ) 382 return values[0] 383 384 @overload 385 def getfailures( 386 self, 387 names: Literal["pytest_collectreport"], 388 ) -> Sequence[CollectReport]: ... 389 390 @overload 391 def getfailures( 392 self, 393 names: Literal["pytest_runtest_logreport"], 394 ) -> Sequence[TestReport]: ... 395 396 @overload 397 def getfailures( 398 self, 399 names: str | Iterable[str] = ( 400 "pytest_collectreport", 401 "pytest_runtest_logreport", 402 ), 403 ) -> Sequence[CollectReport | TestReport]: ... 404 405 def getfailures( 406 self, 407 names: str | Iterable[str] = ( 408 "pytest_collectreport", 409 "pytest_runtest_logreport", 410 ), 411 ) -> Sequence[CollectReport | TestReport]: 412 return [rep for rep in self.getreports(names) if rep.failed] 413 414 def getfailedcollections(self) -> Sequence[CollectReport]: 415 return self.getfailures("pytest_collectreport") 416 417 def listoutcomes( 418 self, 419 ) -> tuple[ 420 Sequence[TestReport], 421 Sequence[CollectReport | TestReport], 422 Sequence[CollectReport | TestReport], 423 ]: 424 passed = [] 425 skipped = [] 426 failed = [] 427 for rep in self.getreports( 428 ("pytest_collectreport", "pytest_runtest_logreport") 429 ): 430 if rep.passed: 431 if rep.when == "call": 432 assert isinstance(rep, TestReport) 433 passed.append(rep) 434 elif rep.skipped: 435 skipped.append(rep) 436 else: 437 assert rep.failed, f"Unexpected outcome: {rep!r}" 438 failed.append(rep) 439 return passed, skipped, failed 440 441 def countoutcomes(self) -> list[int]: 442 return [len(x) for x in self.listoutcomes()] 443 444 def assertoutcome(self, passed: int = 0, skipped: int = 0, failed: int = 0) -> None: 445 __tracebackhide__ = True 446 from _pytest.pytester_assertions import assertoutcome 447 448 outcomes = self.listoutcomes() 449 assertoutcome( 450 outcomes, 451 passed=passed, 452 skipped=skipped, 453 failed=failed, 454 ) 455 456 def clear(self) -> None: 457 self.calls[:] = []
Record all hooks called in a plugin manager.
Hook recorders are created by Pytester.
This wraps all the hook calls in the plugin manager, recording each call before propagating the normal calls.
259 def __init__( 260 self, pluginmanager: PytestPluginManager, *, _ispytest: bool = False 261 ) -> None: 262 check_ispytest(_ispytest) 263 264 self._pluginmanager = pluginmanager 265 self.calls: list[RecordedHookCall] = [] 266 self.ret: int | ExitCode | None = None 267 268 def before(hook_name: str, hook_impls, kwargs) -> None: 269 self.calls.append(RecordedHookCall(hook_name, kwargs)) 270 271 def after(outcome, hook_name: str, hook_impls, kwargs) -> None: 272 pass 273 274 self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
279 def getcalls(self, names: str | Iterable[str]) -> list[RecordedHookCall]: 280 """Get all recorded calls to hooks with the given names (or name).""" 281 if isinstance(names, str): 282 names = names.split() 283 return [call for call in self.calls if call._name in names]
Get all recorded calls to hooks with the given names (or name).
285 def assert_contains(self, entries: Sequence[tuple[str, str]]) -> None: 286 __tracebackhide__ = True 287 i = 0 288 entries = list(entries) 289 # Since Python 3.13, f_locals is not a dict, but eval requires a dict. 290 backlocals = dict(sys._getframe(1).f_locals) 291 while entries: 292 name, check = entries.pop(0) 293 for ind, call in enumerate(self.calls[i:]): 294 if call._name == name: 295 print("NAMEMATCH", name, call) 296 if eval(check, backlocals, call.__dict__): 297 print("CHECKERMATCH", repr(check), "->", call) 298 else: 299 print("NOCHECKERMATCH", repr(check), "-", call) 300 continue 301 i += ind + 1 302 break 303 print("NONAMEMATCH", name, "with", call) 304 else: 305 fail(f"could not find {name!r} check {check!r}")
307 def popcall(self, name: str) -> RecordedHookCall: 308 __tracebackhide__ = True 309 for i, call in enumerate(self.calls): 310 if call._name == name: 311 del self.calls[i] 312 return call 313 lines = [f"could not find call {name!r}, in:"] 314 lines.extend([f" {x}" for x in self.calls]) 315 fail("\n".join(lines))
354 def matchreport( 355 self, 356 inamepart: str = "", 357 names: str | Iterable[str] = ( 358 "pytest_runtest_logreport", 359 "pytest_collectreport", 360 ), 361 when: str | None = None, 362 ) -> CollectReport | TestReport: 363 """Return a testreport whose dotted import path matches.""" 364 values = [] 365 for rep in self.getreports(names=names): 366 if not when and rep.when != "call" and rep.passed: 367 # setup/teardown passing reports - let's ignore those 368 continue 369 if when and rep.when != when: 370 continue 371 if not inamepart or inamepart in rep.nodeid.split("::"): 372 values.append(rep) 373 if not values: 374 raise ValueError( 375 f"could not find test report matching {inamepart!r}: " 376 "no test reports at all!" 377 ) 378 if len(values) > 1: 379 raise ValueError( 380 f"found 2 or more testreports matching {inamepart!r}: {values}" 381 ) 382 return values[0]
Return a testreport whose dotted import path matches.
417 def listoutcomes( 418 self, 419 ) -> tuple[ 420 Sequence[TestReport], 421 Sequence[CollectReport | TestReport], 422 Sequence[CollectReport | TestReport], 423 ]: 424 passed = [] 425 skipped = [] 426 failed = [] 427 for rep in self.getreports( 428 ("pytest_collectreport", "pytest_runtest_logreport") 429 ): 430 if rep.passed: 431 if rep.when == "call": 432 assert isinstance(rep, TestReport) 433 passed.append(rep) 434 elif rep.skipped: 435 skipped.append(rep) 436 else: 437 assert rep.failed, f"Unexpected outcome: {rep!r}" 438 failed.append(rep) 439 return passed, skipped, failed
444 def assertoutcome(self, passed: int = 0, skipped: int = 0, failed: int = 0) -> None: 445 __tracebackhide__ = True 446 from _pytest.pytester_assertions import assertoutcome 447 448 outcomes = self.listoutcomes() 449 assertoutcome( 450 outcomes, 451 passed=passed, 452 skipped=skipped, 453 failed=failed, 454 )
656class Item(Node, abc.ABC): 657 """Base class of all test invocation items. 658 659 Note that for a single function there might be multiple test invocation items. 660 """ 661 662 nextitem = None 663 664 def __init__( 665 self, 666 name, 667 parent=None, 668 config: Config | None = None, 669 session: Session | None = None, 670 nodeid: str | None = None, 671 **kw, 672 ) -> None: 673 # The first two arguments are intentionally passed positionally, 674 # to keep plugins who define a node type which inherits from 675 # (pytest.Item, pytest.File) working (see issue #8435). 676 # They can be made kwargs when the deprecation above is done. 677 super().__init__( 678 name, 679 parent, 680 config=config, 681 session=session, 682 nodeid=nodeid, 683 **kw, 684 ) 685 self._report_sections: list[tuple[str, str, str]] = [] 686 687 #: A list of tuples (name, value) that holds user defined properties 688 #: for this test. 689 self.user_properties: list[tuple[str, object]] = [] 690 691 self._check_item_and_collector_diamond_inheritance() 692 693 def _check_item_and_collector_diamond_inheritance(self) -> None: 694 """ 695 Check if the current type inherits from both File and Collector 696 at the same time, emitting a warning accordingly (#8447). 697 """ 698 cls = type(self) 699 700 # We inject an attribute in the type to avoid issuing this warning 701 # for the same class more than once, which is not helpful. 702 # It is a hack, but was deemed acceptable in order to avoid 703 # flooding the user in the common case. 704 attr_name = "_pytest_diamond_inheritance_warning_shown" 705 if getattr(cls, attr_name, False): 706 return 707 setattr(cls, attr_name, True) 708 709 problems = ", ".join( 710 base.__name__ for base in cls.__bases__ if issubclass(base, Collector) 711 ) 712 if problems: 713 warnings.warn( 714 f"{cls.__name__} is an Item subclass and should not be a collector, " 715 f"however its bases {problems} are collectors.\n" 716 "Please split the Collectors and the Item into separate node types.\n" 717 "Pytest Doc example: https://docs.pytest.org/en/latest/example/nonpython.html\n" 718 "example pull request on a plugin: https://github.com/asmeurer/pytest-flakes/pull/40/", 719 PytestWarning, 720 ) 721 722 @abc.abstractmethod 723 def runtest(self) -> None: 724 """Run the test case for this item. 725 726 Must be implemented by subclasses. 727 728 .. seealso:: :ref:`non-python tests` 729 """ 730 raise NotImplementedError("runtest must be implemented by Item subclass") 731 732 def add_report_section(self, when: str, key: str, content: str) -> None: 733 """Add a new report section, similar to what's done internally to add 734 stdout and stderr captured output:: 735 736 item.add_report_section("call", "stdout", "report section contents") 737 738 :param str when: 739 One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. 740 :param str key: 741 Name of the section, can be customized at will. Pytest uses ``"stdout"`` and 742 ``"stderr"`` internally. 743 :param str content: 744 The full contents as a string. 745 """ 746 if content: 747 self._report_sections.append((when, key, content)) 748 749 def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: 750 """Get location information for this item for test reports. 751 752 Returns a tuple with three elements: 753 754 - The path of the test (default ``self.path``) 755 - The 0-based line number of the test (default ``None``) 756 - A name of the test to be shown (default ``""``) 757 758 .. seealso:: :ref:`non-python tests` 759 """ 760 return self.path, None, "" 761 762 @cached_property 763 def location(self) -> tuple[str, int | None, str]: 764 """ 765 Returns a tuple of ``(relfspath, lineno, testname)`` for this item 766 where ``relfspath`` is file path relative to ``config.rootpath`` 767 and lineno is a 0-based line number. 768 """ 769 location = self.reportinfo() 770 path = absolutepath(location[0]) 771 relfspath = self.session._node_location_to_relpath(path) 772 assert type(location[2]) is str 773 return (relfspath, location[1], location[2])
Base class of all test invocation items.
Note that for a single function there might be multiple test invocation items.
722 @abc.abstractmethod 723 def runtest(self) -> None: 724 """Run the test case for this item. 725 726 Must be implemented by subclasses. 727 728 .. seealso:: :ref:`non-python tests` 729 """ 730 raise NotImplementedError("runtest must be implemented by Item subclass")
Run the test case for this item.
Must be implemented by subclasses.
seealso :ref:non-python tests.
732 def add_report_section(self, when: str, key: str, content: str) -> None: 733 """Add a new report section, similar to what's done internally to add 734 stdout and stderr captured output:: 735 736 item.add_report_section("call", "stdout", "report section contents") 737 738 :param str when: 739 One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. 740 :param str key: 741 Name of the section, can be customized at will. Pytest uses ``"stdout"`` and 742 ``"stderr"`` internally. 743 :param str content: 744 The full contents as a string. 745 """ 746 if content: 747 self._report_sections.append((when, key, content))
Add a new report section, similar to what's done internally to add stdout and stderr captured output::
item.add_report_section("call", "stdout", "report section contents")
Parameters
- str when:
One of the possible capture states,
"setup","call","teardown". - str key:
Name of the section, can be customized at will. Pytest uses
"stdout"and"stderr"internally. - str content: The full contents as a string.
749 def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: 750 """Get location information for this item for test reports. 751 752 Returns a tuple with three elements: 753 754 - The path of the test (default ``self.path``) 755 - The 0-based line number of the test (default ``None``) 756 - A name of the test to be shown (default ``""``) 757 758 .. seealso:: :ref:`non-python tests` 759 """ 760 return self.path, None, ""
Get location information for this item for test reports.
Returns a tuple with three elements:
- The path of the test (default
self.path) - The 0-based line number of the test (default
None) - A name of the test to be shown (default
"")
seealso :ref:non-python tests.
762 @cached_property 763 def location(self) -> tuple[str, int | None, str]: 764 """ 765 Returns a tuple of ``(relfspath, lineno, testname)`` for this item 766 where ``relfspath`` is file path relative to ``config.rootpath`` 767 and lineno is a 0-based line number. 768 """ 769 location = self.reportinfo() 770 path = absolutepath(location[0]) 771 relfspath = self.session._node_location_to_relpath(path) 772 assert type(location[2]) is str 773 return (relfspath, location[1], location[2])
Returns a tuple of (relfspath, lineno, testname) for this item
where relfspath is file path relative to config.rootpath
and lineno is a 0-based line number.
1569class LineMatcher: 1570 """Flexible matching of text. 1571 1572 This is a convenience class to test large texts like the output of 1573 commands. 1574 1575 The constructor takes a list of lines without their trailing newlines, i.e. 1576 ``text.splitlines()``. 1577 """ 1578 1579 def __init__(self, lines: list[str]) -> None: 1580 self.lines = lines 1581 self._log_output: list[str] = [] 1582 1583 def __str__(self) -> str: 1584 """Return the entire original text. 1585 1586 .. versionadded:: 6.2 1587 You can use :meth:`str` in older versions. 1588 """ 1589 return "\n".join(self.lines) 1590 1591 def _getlines(self, lines2: str | Sequence[str] | Source) -> Sequence[str]: 1592 if isinstance(lines2, str): 1593 lines2 = Source(lines2) 1594 if isinstance(lines2, Source): 1595 lines2 = lines2.strip().lines 1596 return lines2 1597 1598 def fnmatch_lines_random(self, lines2: Sequence[str]) -> None: 1599 """Check lines exist in the output in any order (using :func:`python:fnmatch.fnmatch`).""" 1600 __tracebackhide__ = True 1601 self._match_lines_random(lines2, fnmatch) 1602 1603 def re_match_lines_random(self, lines2: Sequence[str]) -> None: 1604 """Check lines exist in the output in any order (using :func:`python:re.match`).""" 1605 __tracebackhide__ = True 1606 self._match_lines_random(lines2, lambda name, pat: bool(re.match(pat, name))) 1607 1608 def _match_lines_random( 1609 self, lines2: Sequence[str], match_func: Callable[[str, str], bool] 1610 ) -> None: 1611 __tracebackhide__ = True 1612 lines2 = self._getlines(lines2) 1613 for line in lines2: 1614 for x in self.lines: 1615 if line == x or match_func(x, line): 1616 self._log("matched: ", repr(line)) 1617 break 1618 else: 1619 msg = f"line {line!r} not found in output" 1620 self._log(msg) 1621 self._fail(msg) 1622 1623 def get_lines_after(self, fnline: str) -> Sequence[str]: 1624 """Return all lines following the given line in the text. 1625 1626 The given line can contain glob wildcards. 1627 """ 1628 for i, line in enumerate(self.lines): 1629 if fnline == line or fnmatch(line, fnline): 1630 return self.lines[i + 1 :] 1631 raise ValueError(f"line {fnline!r} not found in output") 1632 1633 def _log(self, *args) -> None: 1634 self._log_output.append(" ".join(str(x) for x in args)) 1635 1636 @property 1637 def _log_text(self) -> str: 1638 return "\n".join(self._log_output) 1639 1640 def fnmatch_lines( 1641 self, lines2: Sequence[str], *, consecutive: bool = False 1642 ) -> None: 1643 """Check lines exist in the output (using :func:`python:fnmatch.fnmatch`). 1644 1645 The argument is a list of lines which have to match and can use glob 1646 wildcards. If they do not match a pytest.fail() is called. The 1647 matches and non-matches are also shown as part of the error message. 1648 1649 :param lines2: String patterns to match. 1650 :param consecutive: Match lines consecutively? 1651 """ 1652 __tracebackhide__ = True 1653 self._match_lines(lines2, fnmatch, "fnmatch", consecutive=consecutive) 1654 1655 def re_match_lines( 1656 self, lines2: Sequence[str], *, consecutive: bool = False 1657 ) -> None: 1658 """Check lines exist in the output (using :func:`python:re.match`). 1659 1660 The argument is a list of lines which have to match using ``re.match``. 1661 If they do not match a pytest.fail() is called. 1662 1663 The matches and non-matches are also shown as part of the error message. 1664 1665 :param lines2: string patterns to match. 1666 :param consecutive: match lines consecutively? 1667 """ 1668 __tracebackhide__ = True 1669 self._match_lines( 1670 lines2, 1671 lambda name, pat: bool(re.match(pat, name)), 1672 "re.match", 1673 consecutive=consecutive, 1674 ) 1675 1676 def _match_lines( 1677 self, 1678 lines2: Sequence[str], 1679 match_func: Callable[[str, str], bool], 1680 match_nickname: str, 1681 *, 1682 consecutive: bool = False, 1683 ) -> None: 1684 """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``. 1685 1686 :param Sequence[str] lines2: 1687 List of string patterns to match. The actual format depends on 1688 ``match_func``. 1689 :param match_func: 1690 A callable ``match_func(line, pattern)`` where line is the 1691 captured line from stdout/stderr and pattern is the matching 1692 pattern. 1693 :param str match_nickname: 1694 The nickname for the match function that will be logged to stdout 1695 when a match occurs. 1696 :param consecutive: 1697 Match lines consecutively? 1698 """ 1699 if not isinstance(lines2, collections.abc.Sequence): 1700 raise TypeError(f"invalid type for lines2: {type(lines2).__name__}") 1701 lines2 = self._getlines(lines2) 1702 lines1 = self.lines[:] 1703 extralines = [] 1704 __tracebackhide__ = True 1705 wnick = len(match_nickname) + 1 1706 started = False 1707 for line in lines2: 1708 nomatchprinted = False 1709 while lines1: 1710 nextline = lines1.pop(0) 1711 if line == nextline: 1712 self._log("exact match:", repr(line)) 1713 started = True 1714 break 1715 elif match_func(nextline, line): 1716 self._log(f"{match_nickname}:", repr(line)) 1717 self._log( 1718 "{:>{width}}".format("with:", width=wnick), repr(nextline) 1719 ) 1720 started = True 1721 break 1722 else: 1723 if consecutive and started: 1724 msg = f"no consecutive match: {line!r}" 1725 self._log(msg) 1726 self._log( 1727 "{:>{width}}".format("with:", width=wnick), repr(nextline) 1728 ) 1729 self._fail(msg) 1730 if not nomatchprinted: 1731 self._log( 1732 "{:>{width}}".format("nomatch:", width=wnick), repr(line) 1733 ) 1734 nomatchprinted = True 1735 self._log("{:>{width}}".format("and:", width=wnick), repr(nextline)) 1736 extralines.append(nextline) 1737 else: 1738 msg = f"remains unmatched: {line!r}" 1739 self._log(msg) 1740 self._fail(msg) 1741 self._log_output = [] 1742 1743 def no_fnmatch_line(self, pat: str) -> None: 1744 """Ensure captured lines do not match the given pattern, using ``fnmatch.fnmatch``. 1745 1746 :param str pat: The pattern to match lines. 1747 """ 1748 __tracebackhide__ = True 1749 self._no_match_line(pat, fnmatch, "fnmatch") 1750 1751 def no_re_match_line(self, pat: str) -> None: 1752 """Ensure captured lines do not match the given pattern, using ``re.match``. 1753 1754 :param str pat: The regular expression to match lines. 1755 """ 1756 __tracebackhide__ = True 1757 self._no_match_line( 1758 pat, lambda name, pat: bool(re.match(pat, name)), "re.match" 1759 ) 1760 1761 def _no_match_line( 1762 self, pat: str, match_func: Callable[[str, str], bool], match_nickname: str 1763 ) -> None: 1764 """Ensure captured lines does not have a the given pattern, using ``fnmatch.fnmatch``. 1765 1766 :param str pat: The pattern to match lines. 1767 """ 1768 __tracebackhide__ = True 1769 nomatch_printed = False 1770 wnick = len(match_nickname) + 1 1771 for line in self.lines: 1772 if match_func(line, pat): 1773 msg = f"{match_nickname}: {pat!r}" 1774 self._log(msg) 1775 self._log("{:>{width}}".format("with:", width=wnick), repr(line)) 1776 self._fail(msg) 1777 else: 1778 if not nomatch_printed: 1779 self._log("{:>{width}}".format("nomatch:", width=wnick), repr(pat)) 1780 nomatch_printed = True 1781 self._log("{:>{width}}".format("and:", width=wnick), repr(line)) 1782 self._log_output = [] 1783 1784 def _fail(self, msg: str) -> None: 1785 __tracebackhide__ = True 1786 log_text = self._log_text 1787 self._log_output = [] 1788 fail(log_text) 1789 1790 def str(self) -> str: 1791 """Return the entire original text.""" 1792 return str(self)
Flexible matching of text.
This is a convenience class to test large texts like the output of commands.
The constructor takes a list of lines without their trailing newlines, i.e.
text.splitlines().
1598 def fnmatch_lines_random(self, lines2: Sequence[str]) -> None: 1599 """Check lines exist in the output in any order (using :func:`python:fnmatch.fnmatch`).""" 1600 __tracebackhide__ = True 1601 self._match_lines_random(lines2, fnmatch)
Check lines exist in the output in any order (using python:fnmatch.fnmatch()).
1603 def re_match_lines_random(self, lines2: Sequence[str]) -> None: 1604 """Check lines exist in the output in any order (using :func:`python:re.match`).""" 1605 __tracebackhide__ = True 1606 self._match_lines_random(lines2, lambda name, pat: bool(re.match(pat, name)))
Check lines exist in the output in any order (using python:re.match()).
1623 def get_lines_after(self, fnline: str) -> Sequence[str]: 1624 """Return all lines following the given line in the text. 1625 1626 The given line can contain glob wildcards. 1627 """ 1628 for i, line in enumerate(self.lines): 1629 if fnline == line or fnmatch(line, fnline): 1630 return self.lines[i + 1 :] 1631 raise ValueError(f"line {fnline!r} not found in output")
Return all lines following the given line in the text.
The given line can contain glob wildcards.
1640 def fnmatch_lines( 1641 self, lines2: Sequence[str], *, consecutive: bool = False 1642 ) -> None: 1643 """Check lines exist in the output (using :func:`python:fnmatch.fnmatch`). 1644 1645 The argument is a list of lines which have to match and can use glob 1646 wildcards. If they do not match a pytest.fail() is called. The 1647 matches and non-matches are also shown as part of the error message. 1648 1649 :param lines2: String patterns to match. 1650 :param consecutive: Match lines consecutively? 1651 """ 1652 __tracebackhide__ = True 1653 self._match_lines(lines2, fnmatch, "fnmatch", consecutive=consecutive)
Check lines exist in the output (using python:fnmatch.fnmatch()).
The argument is a list of lines which have to match and can use glob wildcards. If they do not match a pytest.fail() is called. The matches and non-matches are also shown as part of the error message.
Parameters
- lines2: String patterns to match.
- consecutive: Match lines consecutively?
1655 def re_match_lines( 1656 self, lines2: Sequence[str], *, consecutive: bool = False 1657 ) -> None: 1658 """Check lines exist in the output (using :func:`python:re.match`). 1659 1660 The argument is a list of lines which have to match using ``re.match``. 1661 If they do not match a pytest.fail() is called. 1662 1663 The matches and non-matches are also shown as part of the error message. 1664 1665 :param lines2: string patterns to match. 1666 :param consecutive: match lines consecutively? 1667 """ 1668 __tracebackhide__ = True 1669 self._match_lines( 1670 lines2, 1671 lambda name, pat: bool(re.match(pat, name)), 1672 "re.match", 1673 consecutive=consecutive, 1674 )
Check lines exist in the output (using python:re.match()).
The argument is a list of lines which have to match using re.match.
If they do not match a pytest.fail() is called.
The matches and non-matches are also shown as part of the error message.
Parameters
- lines2: string patterns to match.
- consecutive: match lines consecutively?
1743 def no_fnmatch_line(self, pat: str) -> None: 1744 """Ensure captured lines do not match the given pattern, using ``fnmatch.fnmatch``. 1745 1746 :param str pat: The pattern to match lines. 1747 """ 1748 __tracebackhide__ = True 1749 self._no_match_line(pat, fnmatch, "fnmatch")
Ensure captured lines do not match the given pattern, using fnmatch.fnmatch.
Parameters
- str pat: The pattern to match lines.
1751 def no_re_match_line(self, pat: str) -> None: 1752 """Ensure captured lines do not match the given pattern, using ``re.match``. 1753 1754 :param str pat: The regular expression to match lines. 1755 """ 1756 __tracebackhide__ = True 1757 self._no_match_line( 1758 pat, lambda name, pat: bool(re.match(pat, name)), "re.match" 1759 )
Ensure captured lines do not match the given pattern, using re.match.
Parameters
- str pat: The regular expression to match lines.
404@final 405class LogCaptureFixture: 406 """Provides access and control of log capturing.""" 407 408 def __init__(self, item: nodes.Node, *, _ispytest: bool = False) -> None: 409 check_ispytest(_ispytest) 410 self._item = item 411 self._initial_handler_level: int | None = None 412 # Dict of log name -> log level. 413 self._initial_logger_levels: dict[str | None, int] = {} 414 self._initial_disabled_logging_level: int | None = None 415 416 def _finalize(self) -> None: 417 """Finalize the fixture. 418 419 This restores the log levels and the disabled logging levels changed by :meth:`set_level`. 420 """ 421 # Restore log levels. 422 if self._initial_handler_level is not None: 423 self.handler.setLevel(self._initial_handler_level) 424 for logger_name, level in self._initial_logger_levels.items(): 425 logger = logging.getLogger(logger_name) 426 logger.setLevel(level) 427 # Disable logging at the original disabled logging level. 428 if self._initial_disabled_logging_level is not None: 429 logging.disable(self._initial_disabled_logging_level) 430 self._initial_disabled_logging_level = None 431 432 @property 433 def handler(self) -> LogCaptureHandler: 434 """Get the logging handler used by the fixture.""" 435 return self._item.stash[caplog_handler_key] 436 437 def get_records( 438 self, when: Literal["setup", "call", "teardown"] 439 ) -> list[logging.LogRecord]: 440 """Get the logging records for one of the possible test phases. 441 442 :param when: 443 Which test phase to obtain the records from. 444 Valid values are: "setup", "call" and "teardown". 445 446 :returns: The list of captured records at the given stage. 447 448 .. versionadded:: 3.4 449 """ 450 return self._item.stash[caplog_records_key].get(when, []) 451 452 @property 453 def text(self) -> str: 454 """The formatted log text.""" 455 return _remove_ansi_escape_sequences(self.handler.stream.getvalue()) 456 457 @property 458 def records(self) -> list[logging.LogRecord]: 459 """The list of log records.""" 460 return self.handler.records 461 462 @property 463 def record_tuples(self) -> list[tuple[str, int, str]]: 464 """A list of a stripped down version of log records intended 465 for use in assertion comparison. 466 467 The format of the tuple is: 468 469 (logger_name, log_level, message) 470 """ 471 return [(r.name, r.levelno, r.getMessage()) for r in self.records] 472 473 @property 474 def messages(self) -> list[str]: 475 """A list of format-interpolated log messages. 476 477 Unlike 'records', which contains the format string and parameters for 478 interpolation, log messages in this list are all interpolated. 479 480 Unlike 'text', which contains the output from the handler, log 481 messages in this list are unadorned with levels, timestamps, etc, 482 making exact comparisons more reliable. 483 484 Note that traceback or stack info (from :func:`logging.exception` or 485 the `exc_info` or `stack_info` arguments to the logging functions) is 486 not included, as this is added by the formatter in the handler. 487 488 .. versionadded:: 3.7 489 """ 490 return [r.getMessage() for r in self.records] 491 492 def clear(self) -> None: 493 """Reset the list of log records and the captured log text.""" 494 self.handler.clear() 495 496 def _force_enable_logging( 497 self, level: int | str, logger_obj: logging.Logger 498 ) -> int: 499 """Enable the desired logging level if the global level was disabled via ``logging.disabled``. 500 501 Only enables logging levels greater than or equal to the requested ``level``. 502 503 Does nothing if the desired ``level`` wasn't disabled. 504 505 :param level: 506 The logger level caplog should capture. 507 All logging is enabled if a non-standard logging level string is supplied. 508 Valid level strings are in :data:`logging._nameToLevel`. 509 :param logger_obj: The logger object to check. 510 511 :return: The original disabled logging level. 512 """ 513 original_disable_level: int = logger_obj.manager.disable 514 515 if isinstance(level, str): 516 # Try to translate the level string to an int for `logging.disable()` 517 level = logging.getLevelName(level) 518 519 if not isinstance(level, int): 520 # The level provided was not valid, so just un-disable all logging. 521 logging.disable(logging.NOTSET) 522 elif not logger_obj.isEnabledFor(level): 523 # Each level is `10` away from other levels. 524 # https://docs.python.org/3/library/logging.html#logging-levels 525 disable_level = max(level - 10, logging.NOTSET) 526 logging.disable(disable_level) 527 528 return original_disable_level 529 530 def set_level(self, level: int | str, logger: str | None = None) -> None: 531 """Set the threshold level of a logger for the duration of a test. 532 533 Logging messages which are less severe than this level will not be captured. 534 535 .. versionchanged:: 3.4 536 The levels of the loggers changed by this function will be 537 restored to their initial values at the end of the test. 538 539 Will enable the requested logging level if it was disabled via :func:`logging.disable`. 540 541 :param level: The level. 542 :param logger: The logger to update. If not given, the root logger. 543 """ 544 logger_obj = logging.getLogger(logger) 545 # Save the original log-level to restore it during teardown. 546 self._initial_logger_levels.setdefault(logger, logger_obj.level) 547 logger_obj.setLevel(level) 548 if self._initial_handler_level is None: 549 self._initial_handler_level = self.handler.level 550 self.handler.setLevel(level) 551 initial_disabled_logging_level = self._force_enable_logging(level, logger_obj) 552 if self._initial_disabled_logging_level is None: 553 self._initial_disabled_logging_level = initial_disabled_logging_level 554 555 @contextmanager 556 def at_level(self, level: int | str, logger: str | None = None) -> Generator[None]: 557 """Context manager that sets the level for capturing of logs. After 558 the end of the 'with' statement the level is restored to its original 559 value. 560 561 Will enable the requested logging level if it was disabled via :func:`logging.disable`. 562 563 :param level: The level. 564 :param logger: The logger to update. If not given, the root logger. 565 """ 566 logger_obj = logging.getLogger(logger) 567 orig_level = logger_obj.level 568 logger_obj.setLevel(level) 569 handler_orig_level = self.handler.level 570 self.handler.setLevel(level) 571 original_disable_level = self._force_enable_logging(level, logger_obj) 572 try: 573 yield 574 finally: 575 logger_obj.setLevel(orig_level) 576 self.handler.setLevel(handler_orig_level) 577 logging.disable(original_disable_level) 578 579 @contextmanager 580 def filtering(self, filter_: logging.Filter) -> Generator[None]: 581 """Context manager that temporarily adds the given filter to the caplog's 582 :meth:`handler` for the 'with' statement block, and removes that filter at the 583 end of the block. 584 585 :param filter_: A custom :class:`logging.Filter` object. 586 587 .. versionadded:: 7.5 588 """ 589 self.handler.addFilter(filter_) 590 try: 591 yield 592 finally: 593 self.handler.removeFilter(filter_)
Provides access and control of log capturing.
408 def __init__(self, item: nodes.Node, *, _ispytest: bool = False) -> None: 409 check_ispytest(_ispytest) 410 self._item = item 411 self._initial_handler_level: int | None = None 412 # Dict of log name -> log level. 413 self._initial_logger_levels: dict[str | None, int] = {} 414 self._initial_disabled_logging_level: int | None = None
432 @property 433 def handler(self) -> LogCaptureHandler: 434 """Get the logging handler used by the fixture.""" 435 return self._item.stash[caplog_handler_key]
Get the logging handler used by the fixture.
437 def get_records( 438 self, when: Literal["setup", "call", "teardown"] 439 ) -> list[logging.LogRecord]: 440 """Get the logging records for one of the possible test phases. 441 442 :param when: 443 Which test phase to obtain the records from. 444 Valid values are: "setup", "call" and "teardown". 445 446 :returns: The list of captured records at the given stage. 447 448 .. versionadded:: 3.4 449 """ 450 return self._item.stash[caplog_records_key].get(when, [])
Get the logging records for one of the possible test phases.
Parameters
- when: Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown".
:returns: The list of captured records at the given stage.
New in version 3.4.
452 @property 453 def text(self) -> str: 454 """The formatted log text.""" 455 return _remove_ansi_escape_sequences(self.handler.stream.getvalue())
The formatted log text.
457 @property 458 def records(self) -> list[logging.LogRecord]: 459 """The list of log records.""" 460 return self.handler.records
The list of log records.
462 @property 463 def record_tuples(self) -> list[tuple[str, int, str]]: 464 """A list of a stripped down version of log records intended 465 for use in assertion comparison. 466 467 The format of the tuple is: 468 469 (logger_name, log_level, message) 470 """ 471 return [(r.name, r.levelno, r.getMessage()) for r in self.records]
A list of a stripped down version of log records intended for use in assertion comparison.
The format of the tuple is:
(logger_name, log_level, message)
473 @property 474 def messages(self) -> list[str]: 475 """A list of format-interpolated log messages. 476 477 Unlike 'records', which contains the format string and parameters for 478 interpolation, log messages in this list are all interpolated. 479 480 Unlike 'text', which contains the output from the handler, log 481 messages in this list are unadorned with levels, timestamps, etc, 482 making exact comparisons more reliable. 483 484 Note that traceback or stack info (from :func:`logging.exception` or 485 the `exc_info` or `stack_info` arguments to the logging functions) is 486 not included, as this is added by the formatter in the handler. 487 488 .. versionadded:: 3.7 489 """ 490 return [r.getMessage() for r in self.records]
A list of format-interpolated log messages.
Unlike 'records', which contains the format string and parameters for interpolation, log messages in this list are all interpolated.
Unlike 'text', which contains the output from the handler, log messages in this list are unadorned with levels, timestamps, etc, making exact comparisons more reliable.
Note that traceback or stack info (from logging.exception() or
the exc_info or stack_info arguments to the logging functions) is
not included, as this is added by the formatter in the handler.
New in version 3.7.
492 def clear(self) -> None: 493 """Reset the list of log records and the captured log text.""" 494 self.handler.clear()
Reset the list of log records and the captured log text.
530 def set_level(self, level: int | str, logger: str | None = None) -> None: 531 """Set the threshold level of a logger for the duration of a test. 532 533 Logging messages which are less severe than this level will not be captured. 534 535 .. versionchanged:: 3.4 536 The levels of the loggers changed by this function will be 537 restored to their initial values at the end of the test. 538 539 Will enable the requested logging level if it was disabled via :func:`logging.disable`. 540 541 :param level: The level. 542 :param logger: The logger to update. If not given, the root logger. 543 """ 544 logger_obj = logging.getLogger(logger) 545 # Save the original log-level to restore it during teardown. 546 self._initial_logger_levels.setdefault(logger, logger_obj.level) 547 logger_obj.setLevel(level) 548 if self._initial_handler_level is None: 549 self._initial_handler_level = self.handler.level 550 self.handler.setLevel(level) 551 initial_disabled_logging_level = self._force_enable_logging(level, logger_obj) 552 if self._initial_disabled_logging_level is None: 553 self._initial_disabled_logging_level = initial_disabled_logging_level
Set the threshold level of a logger for the duration of a test.
Logging messages which are less severe than this level will not be captured.
Changed in version 3.4: The levels of the loggers changed by this function will be restored to their initial values at the end of the test.
Will enable the requested logging level if it was disabled via logging.disable().
Parameters
- level: The level.
- logger: The logger to update. If not given, the root logger.
555 @contextmanager 556 def at_level(self, level: int | str, logger: str | None = None) -> Generator[None]: 557 """Context manager that sets the level for capturing of logs. After 558 the end of the 'with' statement the level is restored to its original 559 value. 560 561 Will enable the requested logging level if it was disabled via :func:`logging.disable`. 562 563 :param level: The level. 564 :param logger: The logger to update. If not given, the root logger. 565 """ 566 logger_obj = logging.getLogger(logger) 567 orig_level = logger_obj.level 568 logger_obj.setLevel(level) 569 handler_orig_level = self.handler.level 570 self.handler.setLevel(level) 571 original_disable_level = self._force_enable_logging(level, logger_obj) 572 try: 573 yield 574 finally: 575 logger_obj.setLevel(orig_level) 576 self.handler.setLevel(handler_orig_level) 577 logging.disable(original_disable_level)
Context manager that sets the level for capturing of logs. After the end of the 'with' statement the level is restored to its original value.
Will enable the requested logging level if it was disabled via logging.disable().
Parameters
- level: The level.
- logger: The logger to update. If not given, the root logger.
579 @contextmanager 580 def filtering(self, filter_: logging.Filter) -> Generator[None]: 581 """Context manager that temporarily adds the given filter to the caplog's 582 :meth:`handler` for the 'with' statement block, and removes that filter at the 583 end of the block. 584 585 :param filter_: A custom :class:`logging.Filter` object. 586 587 .. versionadded:: 7.5 588 """ 589 self.handler.addFilter(filter_) 590 try: 591 yield 592 finally: 593 self.handler.removeFilter(filter_)
Context manager that temporarily adds the given filter to the caplog's
handler() for the 'with' statement block, and removes that filter at the
end of the block.
Parameters
- filter_: A custom
logging.Filterobject.
New in version 7.5.
233@final 234@dataclasses.dataclass(frozen=True) 235class Mark: 236 """A pytest mark.""" 237 238 #: Name of the mark. 239 name: str 240 #: Positional arguments of the mark decorator. 241 args: tuple[Any, ...] 242 #: Keyword arguments of the mark decorator. 243 kwargs: Mapping[str, Any] 244 245 #: Source Mark for ids with parametrize Marks. 246 _param_ids_from: Mark | None = dataclasses.field(default=None, repr=False) 247 #: Resolved/generated ids with parametrize Marks. 248 _param_ids_generated: Sequence[str] | None = dataclasses.field( 249 default=None, repr=False 250 ) 251 252 def __init__( 253 self, 254 name: str, 255 args: tuple[Any, ...], 256 kwargs: Mapping[str, Any], 257 param_ids_from: Mark | None = None, 258 param_ids_generated: Sequence[str] | None = None, 259 *, 260 _ispytest: bool = False, 261 ) -> None: 262 """:meta private:""" 263 check_ispytest(_ispytest) 264 # Weirdness to bypass frozen=True. 265 object.__setattr__(self, "name", name) 266 object.__setattr__(self, "args", args) 267 object.__setattr__(self, "kwargs", kwargs) 268 object.__setattr__(self, "_param_ids_from", param_ids_from) 269 object.__setattr__(self, "_param_ids_generated", param_ids_generated) 270 271 def _has_param_ids(self) -> bool: 272 return "ids" in self.kwargs or len(self.args) >= 4 273 274 def combined_with(self, other: Mark) -> Mark: 275 """Return a new Mark which is a combination of this 276 Mark and another Mark. 277 278 Combines by appending args and merging kwargs. 279 280 :param Mark other: The mark to combine with. 281 :rtype: Mark 282 """ 283 assert self.name == other.name 284 285 # Remember source of ids with parametrize Marks. 286 param_ids_from: Mark | None = None 287 if self.name == "parametrize": 288 if other._has_param_ids(): 289 param_ids_from = other 290 elif self._has_param_ids(): 291 param_ids_from = self 292 293 return Mark( 294 self.name, 295 self.args + other.args, 296 dict(self.kwargs, **other.kwargs), 297 param_ids_from=param_ids_from, 298 _ispytest=True, 299 )
A pytest mark.
252 def __init__( 253 self, 254 name: str, 255 args: tuple[Any, ...], 256 kwargs: Mapping[str, Any], 257 param_ids_from: Mark | None = None, 258 param_ids_generated: Sequence[str] | None = None, 259 *, 260 _ispytest: bool = False, 261 ) -> None: 262 """:meta private:""" 263 check_ispytest(_ispytest) 264 # Weirdness to bypass frozen=True. 265 object.__setattr__(self, "name", name) 266 object.__setattr__(self, "args", args) 267 object.__setattr__(self, "kwargs", kwargs) 268 object.__setattr__(self, "_param_ids_from", param_ids_from) 269 object.__setattr__(self, "_param_ids_generated", param_ids_generated)
:meta private:
274 def combined_with(self, other: Mark) -> Mark: 275 """Return a new Mark which is a combination of this 276 Mark and another Mark. 277 278 Combines by appending args and merging kwargs. 279 280 :param Mark other: The mark to combine with. 281 :rtype: Mark 282 """ 283 assert self.name == other.name 284 285 # Remember source of ids with parametrize Marks. 286 param_ids_from: Mark | None = None 287 if self.name == "parametrize": 288 if other._has_param_ids(): 289 param_ids_from = other 290 elif self._has_param_ids(): 291 param_ids_from = self 292 293 return Mark( 294 self.name, 295 self.args + other.args, 296 dict(self.kwargs, **other.kwargs), 297 param_ids_from=param_ids_from, 298 _ispytest=True, 299 )
Return a new Mark which is a combination of this Mark and another Mark.
Combines by appending args and merging kwargs.
Parameters
- Mark other: The mark to combine with.
308@dataclasses.dataclass 309class MarkDecorator: 310 """A decorator for applying a mark on test functions and classes. 311 312 ``MarkDecorators`` are created with ``pytest.mark``:: 313 314 mark1 = pytest.mark.NAME # Simple MarkDecorator 315 mark2 = pytest.mark.NAME(name1=value) # Parametrized MarkDecorator 316 317 and can then be applied as decorators to test functions:: 318 319 @mark2 320 def test_function(): 321 pass 322 323 When a ``MarkDecorator`` is called, it does the following: 324 325 1. If called with a single class as its only positional argument and no 326 additional keyword arguments, it attaches the mark to the class so it 327 gets applied automatically to all test cases found in that class. 328 329 2. If called with a single function as its only positional argument and 330 no additional keyword arguments, it attaches the mark to the function, 331 containing all the arguments already stored internally in the 332 ``MarkDecorator``. 333 334 3. When called in any other case, it returns a new ``MarkDecorator`` 335 instance with the original ``MarkDecorator``'s content updated with 336 the arguments passed to this call. 337 338 Note: The rules above prevent a ``MarkDecorator`` from storing only a 339 single function or class reference as its positional argument with no 340 additional keyword or positional arguments. You can work around this by 341 using `with_args()`. 342 """ 343 344 mark: Mark 345 346 def __init__(self, mark: Mark, *, _ispytest: bool = False) -> None: 347 """:meta private:""" 348 check_ispytest(_ispytest) 349 self.mark = mark 350 351 @property 352 def name(self) -> str: 353 """Alias for mark.name.""" 354 return self.mark.name 355 356 @property 357 def args(self) -> tuple[Any, ...]: 358 """Alias for mark.args.""" 359 return self.mark.args 360 361 @property 362 def kwargs(self) -> Mapping[str, Any]: 363 """Alias for mark.kwargs.""" 364 return self.mark.kwargs 365 366 @property 367 def markname(self) -> str: 368 """:meta private:""" 369 return self.name # for backward-compat (2.4.1 had this attr) 370 371 def with_args(self, *args: object, **kwargs: object) -> MarkDecorator: 372 """Return a MarkDecorator with extra arguments added. 373 374 Unlike calling the MarkDecorator, with_args() can be used even 375 if the sole argument is a callable/class. 376 """ 377 mark = Mark(self.name, args, kwargs, _ispytest=True) 378 return MarkDecorator(self.mark.combined_with(mark), _ispytest=True) 379 380 # Type ignored because the overloads overlap with an incompatible 381 # return type. Not much we can do about that. Thankfully mypy picks 382 # the first match so it works out even if we break the rules. 383 @overload 384 def __call__(self, arg: Markable) -> Markable: # type: ignore[overload-overlap] 385 pass 386 387 @overload 388 def __call__(self, *args: object, **kwargs: object) -> MarkDecorator: 389 pass 390 391 def __call__(self, *args: object, **kwargs: object): 392 """Call the MarkDecorator.""" 393 if args and not kwargs: 394 func = args[0] 395 is_class = inspect.isclass(func) 396 # For staticmethods/classmethods, the marks are eventually fetched from the 397 # function object, not the descriptor, so unwrap. 398 unwrapped_func = func 399 if isinstance(func, staticmethod | classmethod): 400 unwrapped_func = func.__func__ 401 if len(args) == 1 and (istestfunc(unwrapped_func) or is_class): 402 store_mark(unwrapped_func, self.mark, stacklevel=3) 403 return func 404 return self.with_args(*args, **kwargs)
A decorator for applying a mark on test functions and classes.
MarkDecorators are created with pytest.mark::
mark1 = pytest.mark.NAME # Simple MarkDecorator
mark2 = pytest.mark.NAME(name1=value) # Parametrized MarkDecorator
and can then be applied as decorators to test functions::
@mark2
def test_function():
pass
When a MarkDecorator is called, it does the following:
If called with a single class as its only positional argument and no additional keyword arguments, it attaches the mark to the class so it gets applied automatically to all test cases found in that class.
If called with a single function as its only positional argument and no additional keyword arguments, it attaches the mark to the function, containing all the arguments already stored internally in the
MarkDecorator.When called in any other case, it returns a new
MarkDecoratorinstance with the originalMarkDecorator's content updated with the arguments passed to this call.
Note: The rules above prevent a MarkDecorator from storing only a
single function or class reference as its positional argument with no
additional keyword or positional arguments. You can work around this by
using with_args().
346 def __init__(self, mark: Mark, *, _ispytest: bool = False) -> None: 347 """:meta private:""" 348 check_ispytest(_ispytest) 349 self.mark = mark
:meta private:
356 @property 357 def args(self) -> tuple[Any, ...]: 358 """Alias for mark.args.""" 359 return self.mark.args
Alias for mark.args.
361 @property 362 def kwargs(self) -> Mapping[str, Any]: 363 """Alias for mark.kwargs.""" 364 return self.mark.kwargs
Alias for mark.kwargs.
366 @property 367 def markname(self) -> str: 368 """:meta private:""" 369 return self.name # for backward-compat (2.4.1 had this attr)
:meta private:
371 def with_args(self, *args: object, **kwargs: object) -> MarkDecorator: 372 """Return a MarkDecorator with extra arguments added. 373 374 Unlike calling the MarkDecorator, with_args() can be used even 375 if the sole argument is a callable/class. 376 """ 377 mark = Mark(self.name, args, kwargs, _ispytest=True) 378 return MarkDecorator(self.mark.combined_with(mark), _ispytest=True)
Return a MarkDecorator with extra arguments added.
Unlike calling the MarkDecorator, with_args() can be used even if the sole argument is a callable/class.
533@final 534class MarkGenerator: 535 """Factory for :class:`MarkDecorator` objects - exposed as 536 a ``pytest.mark`` singleton instance. 537 538 Example:: 539 540 import pytest 541 542 543 @pytest.mark.slowtest 544 def test_function(): 545 pass 546 547 applies a 'slowtest' :class:`Mark` on ``test_function``. 548 """ 549 550 # See TYPE_CHECKING above. 551 if TYPE_CHECKING: 552 skip: _SkipMarkDecorator 553 skipif: _SkipifMarkDecorator 554 xfail: _XfailMarkDecorator 555 parametrize: _ParametrizeMarkDecorator 556 usefixtures: _UsefixturesMarkDecorator 557 filterwarnings: _FilterwarningsMarkDecorator 558 559 def __init__(self, *, _ispytest: bool = False) -> None: 560 check_ispytest(_ispytest) 561 self._config: Config | None = None 562 self._markers: set[str] = set() 563 564 def __getattr__(self, name: str) -> MarkDecorator: 565 """Generate a new :class:`MarkDecorator` with the given name.""" 566 if name[0] == "_": 567 raise AttributeError("Marker name must NOT start with underscore") 568 569 if self._config is not None: 570 # We store a set of markers as a performance optimisation - if a mark 571 # name is in the set we definitely know it, but a mark may be known and 572 # not in the set. We therefore start by updating the set! 573 if name not in self._markers: 574 for line in self._config.getini("markers"): 575 # example lines: "skipif(condition): skip the given test if..." 576 # or "hypothesis: tests which use Hypothesis", so to get the 577 # marker name we split on both `:` and `(`. 578 marker = line.split(":")[0].split("(")[0].strip() 579 self._markers.add(marker) 580 581 # If the name is not in the set of known marks after updating, 582 # then it really is time to issue a warning or an error. 583 if name not in self._markers: 584 # Raise a specific error for common misspellings of "parametrize". 585 if name in ["parameterize", "parametrise", "parameterise"]: 586 __tracebackhide__ = True 587 fail(f"Unknown '{name}' mark, did you mean 'parametrize'?") 588 589 strict_markers = self._config.getini("strict_markers") 590 if strict_markers is None: 591 strict_markers = self._config.getini("strict") 592 if strict_markers: 593 fail( 594 f"{name!r} not found in `markers` configuration option", 595 pytrace=False, 596 ) 597 598 warnings.warn( 599 f"Unknown pytest.mark.{name} - is this a typo? You can register " 600 "custom marks to avoid this warning - for details, see " 601 "https://docs.pytest.org/en/stable/how-to/mark.html", 602 PytestUnknownMarkWarning, 603 2, 604 ) 605 606 return MarkDecorator(Mark(name, (), {}, _ispytest=True), _ispytest=True)
Factory for MarkDecorator objects - exposed as
a pytest.mark singleton instance.
Example::
import pytest
@pytest.mark.slowtest
def test_function():
pass
applies a 'slowtest' Mark on test_function.
1167@final 1168class Metafunc: 1169 """Objects passed to the :hook:`pytest_generate_tests` hook. 1170 1171 They help to inspect a test function and to generate tests according to 1172 test configuration or values specified in the class or module where a 1173 test function is defined. 1174 """ 1175 1176 def __init__( 1177 self, 1178 definition: FunctionDefinition, 1179 fixtureinfo: fixtures.FuncFixtureInfo, 1180 config: Config, 1181 cls=None, 1182 module=None, 1183 *, 1184 _ispytest: bool = False, 1185 ) -> None: 1186 check_ispytest(_ispytest) 1187 1188 #: Access to the underlying :class:`_pytest.python.FunctionDefinition`. 1189 self.definition = definition 1190 1191 #: Access to the :class:`pytest.Config` object for the test session. 1192 self.config = config 1193 1194 #: The module object where the test function is defined in. 1195 self.module = module 1196 1197 #: Underlying Python test function. 1198 self.function = definition.obj 1199 1200 #: Set of fixture names required by the test function. 1201 self.fixturenames = fixtureinfo.names_closure 1202 1203 #: Class object where the test function is defined in or ``None``. 1204 self.cls = cls 1205 1206 self._arg2fixturedefs = fixtureinfo.name2fixturedefs 1207 1208 # Result of parametrize(). 1209 self._calls: list[CallSpec2] = [] 1210 1211 self._params_directness: dict[str, Literal["indirect", "direct"]] = {} 1212 1213 def parametrize( 1214 self, 1215 argnames: str | Sequence[str], 1216 argvalues: Iterable[ParameterSet | Sequence[object] | object], 1217 indirect: bool | Sequence[str] = False, 1218 ids: Iterable[object | None] | Callable[[Any], object | None] | None = None, 1219 scope: _ScopeName | None = None, 1220 *, 1221 _param_mark: Mark | None = None, 1222 ) -> None: 1223 """Add new invocations to the underlying test function using the list 1224 of argvalues for the given argnames. Parametrization is performed 1225 during the collection phase. If you need to setup expensive resources 1226 see about setting ``indirect`` to do it at test setup time instead. 1227 1228 Can be called multiple times per test function (but only on different 1229 argument names), in which case each call parametrizes all previous 1230 parametrizations, e.g. 1231 1232 :: 1233 1234 unparametrized: t 1235 parametrize ["x", "y"]: t[x], t[y] 1236 parametrize [1, 2]: t[x-1], t[x-2], t[y-1], t[y-2] 1237 1238 :param argnames: 1239 A comma-separated string denoting one or more argument names, or 1240 a list/tuple of argument strings. 1241 1242 :param argvalues: 1243 The list of argvalues determines how often a test is invoked with 1244 different argument values. 1245 1246 If only one argname was specified argvalues is a list of values. 1247 If N argnames were specified, argvalues must be a list of 1248 N-tuples, where each tuple-element specifies a value for its 1249 respective argname. 1250 1251 :param indirect: 1252 A list of arguments' names (subset of argnames) or a boolean. 1253 If True the list contains all names from the argnames. Each 1254 argvalue corresponding to an argname in this list will 1255 be passed as request.param to its respective argname fixture 1256 function so that it can perform more expensive setups during the 1257 setup phase of a test rather than at collection time. 1258 1259 :param ids: 1260 Sequence of (or generator for) ids for ``argvalues``, 1261 or a callable to return part of the id for each argvalue. 1262 1263 With sequences (and generators like ``itertools.count()``) the 1264 returned ids should be of type ``string``, ``int``, ``float``, 1265 ``bool``, or ``None``. 1266 They are mapped to the corresponding index in ``argvalues``. 1267 ``None`` means to use the auto-generated id. 1268 1269 .. versionadded:: 8.4 1270 :ref:`hidden-param` means to hide the parameter set 1271 from the test name. Can only be used at most 1 time, as 1272 test names need to be unique. 1273 1274 If it is a callable it will be called for each entry in 1275 ``argvalues``, and the return value is used as part of the 1276 auto-generated id for the whole set (where parts are joined with 1277 dashes ("-")). 1278 This is useful to provide more specific ids for certain items, e.g. 1279 dates. Returning ``None`` will use an auto-generated id. 1280 1281 If no ids are provided they will be generated automatically from 1282 the argvalues. 1283 1284 :param scope: 1285 If specified it denotes the scope of the parameters. 1286 The scope is used for grouping tests by parameter instances. 1287 It will also override any fixture-function defined scope, allowing 1288 to set a dynamic scope using test context or configuration. 1289 """ 1290 nodeid = self.definition.nodeid 1291 1292 argnames, parametersets = ParameterSet._for_parametrize( 1293 argnames, 1294 argvalues, 1295 self.function, 1296 self.config, 1297 nodeid=self.definition.nodeid, 1298 ) 1299 del argvalues 1300 1301 if "request" in argnames: 1302 fail( 1303 f"{nodeid}: 'request' is a reserved name and cannot be used in @pytest.mark.parametrize", 1304 pytrace=False, 1305 ) 1306 1307 if scope is not None: 1308 scope_ = Scope.from_user( 1309 scope, descr=f"parametrize() call in {self.function.__name__}" 1310 ) 1311 else: 1312 scope_ = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) 1313 1314 self._validate_if_using_arg_names(argnames, indirect) 1315 1316 # Use any already (possibly) generated ids with parametrize Marks. 1317 if _param_mark and _param_mark._param_ids_from: 1318 generated_ids = _param_mark._param_ids_from._param_ids_generated 1319 if generated_ids is not None: 1320 ids = generated_ids 1321 1322 ids = self._resolve_parameter_set_ids( 1323 argnames, ids, parametersets, nodeid=self.definition.nodeid 1324 ) 1325 1326 # Store used (possibly generated) ids with parametrize Marks. 1327 if _param_mark and _param_mark._param_ids_from and generated_ids is None: 1328 object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids) 1329 1330 # Calculate directness. 1331 arg_directness = self._resolve_args_directness(argnames, indirect) 1332 self._params_directness.update(arg_directness) 1333 1334 # Add direct parametrizations as fixturedefs to arg2fixturedefs by 1335 # registering artificial "pseudo" FixtureDef's such that later at test 1336 # setup time we can rely on FixtureDefs to exist for all argnames. 1337 node = None 1338 # For scopes higher than function, a "pseudo" FixtureDef might have 1339 # already been created for the scope. We thus store and cache the 1340 # FixtureDef on the node related to the scope. 1341 if scope_ is Scope.Function: 1342 name2pseudofixturedef = None 1343 else: 1344 collector = self.definition.parent 1345 assert collector is not None 1346 node = get_scope_node(collector, scope_) 1347 if node is None: 1348 # If used class scope and there is no class, use module-level 1349 # collector (for now). 1350 if scope_ is Scope.Class: 1351 assert isinstance(collector, Module) 1352 node = collector 1353 # If used package scope and there is no package, use session 1354 # (for now). 1355 elif scope_ is Scope.Package: 1356 node = collector.session 1357 else: 1358 assert False, f"Unhandled missing scope: {scope}" 1359 default: dict[str, FixtureDef[Any]] = {} 1360 name2pseudofixturedef = node.stash.setdefault( 1361 name2pseudofixturedef_key, default 1362 ) 1363 for argname in argnames: 1364 if arg_directness[argname] == "indirect": 1365 continue 1366 if name2pseudofixturedef is not None and argname in name2pseudofixturedef: 1367 fixturedef = name2pseudofixturedef[argname] 1368 else: 1369 fixturedef = FixtureDef( 1370 config=self.config, 1371 baseid="", 1372 argname=argname, 1373 func=get_direct_param_fixture_func, 1374 scope=scope_, 1375 params=None, 1376 ids=None, 1377 _ispytest=True, 1378 ) 1379 if name2pseudofixturedef is not None: 1380 name2pseudofixturedef[argname] = fixturedef 1381 self._arg2fixturedefs[argname] = [fixturedef] 1382 1383 # Create the new calls: if we are parametrize() multiple times (by applying the decorator 1384 # more than once) then we accumulate those calls generating the cartesian product 1385 # of all calls. 1386 newcalls = [] 1387 for callspec in self._calls or [CallSpec2()]: 1388 for param_index, (param_id, param_set) in enumerate( 1389 zip(ids, parametersets, strict=True) 1390 ): 1391 newcallspec = callspec.setmulti( 1392 argnames=argnames, 1393 valset=param_set.values, 1394 id=param_id, 1395 marks=param_set.marks, 1396 scope=scope_, 1397 param_index=param_index, 1398 nodeid=nodeid, 1399 ) 1400 newcalls.append(newcallspec) 1401 self._calls = newcalls 1402 1403 def _resolve_parameter_set_ids( 1404 self, 1405 argnames: Sequence[str], 1406 ids: Iterable[object | None] | Callable[[Any], object | None] | None, 1407 parametersets: Sequence[ParameterSet], 1408 nodeid: str, 1409 ) -> list[str | _HiddenParam]: 1410 """Resolve the actual ids for the given parameter sets. 1411 1412 :param argnames: 1413 Argument names passed to ``parametrize()``. 1414 :param ids: 1415 The `ids` parameter of the ``parametrize()`` call (see docs). 1416 :param parametersets: 1417 The parameter sets, each containing a set of values corresponding 1418 to ``argnames``. 1419 :param nodeid str: 1420 The nodeid of the definition item that generated this 1421 parametrization. 1422 :returns: 1423 List with ids for each parameter set given. 1424 """ 1425 if ids is None: 1426 idfn = None 1427 ids_ = None 1428 elif callable(ids): 1429 idfn = ids 1430 ids_ = None 1431 else: 1432 idfn = None 1433 ids_ = self._validate_ids(ids, parametersets, self.function.__name__) 1434 id_maker = IdMaker( 1435 argnames, 1436 parametersets, 1437 idfn, 1438 ids_, 1439 self.config, 1440 nodeid=nodeid, 1441 func_name=self.function.__name__, 1442 ) 1443 return id_maker.make_unique_parameterset_ids() 1444 1445 def _validate_ids( 1446 self, 1447 ids: Iterable[object | None], 1448 parametersets: Sequence[ParameterSet], 1449 func_name: str, 1450 ) -> list[object | None]: 1451 try: 1452 num_ids = len(ids) # type: ignore[arg-type] 1453 except TypeError: 1454 try: 1455 iter(ids) 1456 except TypeError as e: 1457 raise TypeError("ids must be a callable or an iterable") from e 1458 num_ids = len(parametersets) 1459 1460 # num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849 1461 if num_ids != len(parametersets) and num_ids != 0: 1462 msg = "In {}: {} parameter sets specified, with different number of ids: {}" 1463 fail(msg.format(func_name, len(parametersets), num_ids), pytrace=False) 1464 1465 return list(itertools.islice(ids, num_ids)) 1466 1467 def _resolve_args_directness( 1468 self, 1469 argnames: Sequence[str], 1470 indirect: bool | Sequence[str], 1471 ) -> dict[str, Literal["indirect", "direct"]]: 1472 """Resolve if each parametrized argument must be considered an indirect 1473 parameter to a fixture of the same name, or a direct parameter to the 1474 parametrized function, based on the ``indirect`` parameter of the 1475 parametrized() call. 1476 1477 :param argnames: 1478 List of argument names passed to ``parametrize()``. 1479 :param indirect: 1480 Same as the ``indirect`` parameter of ``parametrize()``. 1481 :returns 1482 A dict mapping each arg name to either "indirect" or "direct". 1483 """ 1484 arg_directness: dict[str, Literal["indirect", "direct"]] 1485 if isinstance(indirect, bool): 1486 arg_directness = dict.fromkeys( 1487 argnames, "indirect" if indirect else "direct" 1488 ) 1489 elif isinstance(indirect, Sequence): 1490 arg_directness = dict.fromkeys(argnames, "direct") 1491 for arg in indirect: 1492 if arg not in argnames: 1493 fail( 1494 f"In {self.function.__name__}: indirect fixture '{arg}' doesn't exist", 1495 pytrace=False, 1496 ) 1497 arg_directness[arg] = "indirect" 1498 else: 1499 fail( 1500 f"In {self.function.__name__}: expected Sequence or boolean" 1501 f" for indirect, got {type(indirect).__name__}", 1502 pytrace=False, 1503 ) 1504 return arg_directness 1505 1506 def _validate_if_using_arg_names( 1507 self, 1508 argnames: Sequence[str], 1509 indirect: bool | Sequence[str], 1510 ) -> None: 1511 """Check if all argnames are being used, by default values, or directly/indirectly. 1512 1513 :param List[str] argnames: List of argument names passed to ``parametrize()``. 1514 :param indirect: Same as the ``indirect`` parameter of ``parametrize()``. 1515 :raises ValueError: If validation fails. 1516 """ 1517 default_arg_names = set(get_default_arg_names(self.function)) 1518 func_name = self.function.__name__ 1519 for arg in argnames: 1520 if arg not in self.fixturenames: 1521 if arg in default_arg_names: 1522 fail( 1523 f"In {func_name}: function already takes an argument '{arg}' with a default value", 1524 pytrace=False, 1525 ) 1526 else: 1527 if isinstance(indirect, Sequence): 1528 name = "fixture" if arg in indirect else "argument" 1529 else: 1530 name = "fixture" if indirect else "argument" 1531 fail( 1532 f"In {func_name}: function uses no {name} '{arg}'", 1533 pytrace=False, 1534 ) 1535 1536 def _recompute_direct_params_indices(self) -> None: 1537 for argname, param_type in self._params_directness.items(): 1538 if param_type == "direct": 1539 for i, callspec in enumerate(self._calls): 1540 callspec.indices[argname] = i
Objects passed to the :hook:pytest_generate_tests hook.
They help to inspect a test function and to generate tests according to test configuration or values specified in the class or module where a test function is defined.
1176 def __init__( 1177 self, 1178 definition: FunctionDefinition, 1179 fixtureinfo: fixtures.FuncFixtureInfo, 1180 config: Config, 1181 cls=None, 1182 module=None, 1183 *, 1184 _ispytest: bool = False, 1185 ) -> None: 1186 check_ispytest(_ispytest) 1187 1188 #: Access to the underlying :class:`_pytest.python.FunctionDefinition`. 1189 self.definition = definition 1190 1191 #: Access to the :class:`pytest.Config` object for the test session. 1192 self.config = config 1193 1194 #: The module object where the test function is defined in. 1195 self.module = module 1196 1197 #: Underlying Python test function. 1198 self.function = definition.obj 1199 1200 #: Set of fixture names required by the test function. 1201 self.fixturenames = fixtureinfo.names_closure 1202 1203 #: Class object where the test function is defined in or ``None``. 1204 self.cls = cls 1205 1206 self._arg2fixturedefs = fixtureinfo.name2fixturedefs 1207 1208 # Result of parametrize(). 1209 self._calls: list[CallSpec2] = [] 1210 1211 self._params_directness: dict[str, Literal["indirect", "direct"]] = {}
1213 def parametrize( 1214 self, 1215 argnames: str | Sequence[str], 1216 argvalues: Iterable[ParameterSet | Sequence[object] | object], 1217 indirect: bool | Sequence[str] = False, 1218 ids: Iterable[object | None] | Callable[[Any], object | None] | None = None, 1219 scope: _ScopeName | None = None, 1220 *, 1221 _param_mark: Mark | None = None, 1222 ) -> None: 1223 """Add new invocations to the underlying test function using the list 1224 of argvalues for the given argnames. Parametrization is performed 1225 during the collection phase. If you need to setup expensive resources 1226 see about setting ``indirect`` to do it at test setup time instead. 1227 1228 Can be called multiple times per test function (but only on different 1229 argument names), in which case each call parametrizes all previous 1230 parametrizations, e.g. 1231 1232 :: 1233 1234 unparametrized: t 1235 parametrize ["x", "y"]: t[x], t[y] 1236 parametrize [1, 2]: t[x-1], t[x-2], t[y-1], t[y-2] 1237 1238 :param argnames: 1239 A comma-separated string denoting one or more argument names, or 1240 a list/tuple of argument strings. 1241 1242 :param argvalues: 1243 The list of argvalues determines how often a test is invoked with 1244 different argument values. 1245 1246 If only one argname was specified argvalues is a list of values. 1247 If N argnames were specified, argvalues must be a list of 1248 N-tuples, where each tuple-element specifies a value for its 1249 respective argname. 1250 1251 :param indirect: 1252 A list of arguments' names (subset of argnames) or a boolean. 1253 If True the list contains all names from the argnames. Each 1254 argvalue corresponding to an argname in this list will 1255 be passed as request.param to its respective argname fixture 1256 function so that it can perform more expensive setups during the 1257 setup phase of a test rather than at collection time. 1258 1259 :param ids: 1260 Sequence of (or generator for) ids for ``argvalues``, 1261 or a callable to return part of the id for each argvalue. 1262 1263 With sequences (and generators like ``itertools.count()``) the 1264 returned ids should be of type ``string``, ``int``, ``float``, 1265 ``bool``, or ``None``. 1266 They are mapped to the corresponding index in ``argvalues``. 1267 ``None`` means to use the auto-generated id. 1268 1269 .. versionadded:: 8.4 1270 :ref:`hidden-param` means to hide the parameter set 1271 from the test name. Can only be used at most 1 time, as 1272 test names need to be unique. 1273 1274 If it is a callable it will be called for each entry in 1275 ``argvalues``, and the return value is used as part of the 1276 auto-generated id for the whole set (where parts are joined with 1277 dashes ("-")). 1278 This is useful to provide more specific ids for certain items, e.g. 1279 dates. Returning ``None`` will use an auto-generated id. 1280 1281 If no ids are provided they will be generated automatically from 1282 the argvalues. 1283 1284 :param scope: 1285 If specified it denotes the scope of the parameters. 1286 The scope is used for grouping tests by parameter instances. 1287 It will also override any fixture-function defined scope, allowing 1288 to set a dynamic scope using test context or configuration. 1289 """ 1290 nodeid = self.definition.nodeid 1291 1292 argnames, parametersets = ParameterSet._for_parametrize( 1293 argnames, 1294 argvalues, 1295 self.function, 1296 self.config, 1297 nodeid=self.definition.nodeid, 1298 ) 1299 del argvalues 1300 1301 if "request" in argnames: 1302 fail( 1303 f"{nodeid}: 'request' is a reserved name and cannot be used in @pytest.mark.parametrize", 1304 pytrace=False, 1305 ) 1306 1307 if scope is not None: 1308 scope_ = Scope.from_user( 1309 scope, descr=f"parametrize() call in {self.function.__name__}" 1310 ) 1311 else: 1312 scope_ = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) 1313 1314 self._validate_if_using_arg_names(argnames, indirect) 1315 1316 # Use any already (possibly) generated ids with parametrize Marks. 1317 if _param_mark and _param_mark._param_ids_from: 1318 generated_ids = _param_mark._param_ids_from._param_ids_generated 1319 if generated_ids is not None: 1320 ids = generated_ids 1321 1322 ids = self._resolve_parameter_set_ids( 1323 argnames, ids, parametersets, nodeid=self.definition.nodeid 1324 ) 1325 1326 # Store used (possibly generated) ids with parametrize Marks. 1327 if _param_mark and _param_mark._param_ids_from and generated_ids is None: 1328 object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids) 1329 1330 # Calculate directness. 1331 arg_directness = self._resolve_args_directness(argnames, indirect) 1332 self._params_directness.update(arg_directness) 1333 1334 # Add direct parametrizations as fixturedefs to arg2fixturedefs by 1335 # registering artificial "pseudo" FixtureDef's such that later at test 1336 # setup time we can rely on FixtureDefs to exist for all argnames. 1337 node = None 1338 # For scopes higher than function, a "pseudo" FixtureDef might have 1339 # already been created for the scope. We thus store and cache the 1340 # FixtureDef on the node related to the scope. 1341 if scope_ is Scope.Function: 1342 name2pseudofixturedef = None 1343 else: 1344 collector = self.definition.parent 1345 assert collector is not None 1346 node = get_scope_node(collector, scope_) 1347 if node is None: 1348 # If used class scope and there is no class, use module-level 1349 # collector (for now). 1350 if scope_ is Scope.Class: 1351 assert isinstance(collector, Module) 1352 node = collector 1353 # If used package scope and there is no package, use session 1354 # (for now). 1355 elif scope_ is Scope.Package: 1356 node = collector.session 1357 else: 1358 assert False, f"Unhandled missing scope: {scope}" 1359 default: dict[str, FixtureDef[Any]] = {} 1360 name2pseudofixturedef = node.stash.setdefault( 1361 name2pseudofixturedef_key, default 1362 ) 1363 for argname in argnames: 1364 if arg_directness[argname] == "indirect": 1365 continue 1366 if name2pseudofixturedef is not None and argname in name2pseudofixturedef: 1367 fixturedef = name2pseudofixturedef[argname] 1368 else: 1369 fixturedef = FixtureDef( 1370 config=self.config, 1371 baseid="", 1372 argname=argname, 1373 func=get_direct_param_fixture_func, 1374 scope=scope_, 1375 params=None, 1376 ids=None, 1377 _ispytest=True, 1378 ) 1379 if name2pseudofixturedef is not None: 1380 name2pseudofixturedef[argname] = fixturedef 1381 self._arg2fixturedefs[argname] = [fixturedef] 1382 1383 # Create the new calls: if we are parametrize() multiple times (by applying the decorator 1384 # more than once) then we accumulate those calls generating the cartesian product 1385 # of all calls. 1386 newcalls = [] 1387 for callspec in self._calls or [CallSpec2()]: 1388 for param_index, (param_id, param_set) in enumerate( 1389 zip(ids, parametersets, strict=True) 1390 ): 1391 newcallspec = callspec.setmulti( 1392 argnames=argnames, 1393 valset=param_set.values, 1394 id=param_id, 1395 marks=param_set.marks, 1396 scope=scope_, 1397 param_index=param_index, 1398 nodeid=nodeid, 1399 ) 1400 newcalls.append(newcallspec) 1401 self._calls = newcalls
Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it at test setup time instead.
Can be called multiple times per test function (but only on different argument names), in which case each call parametrizes all previous parametrizations, e.g.
::
unparametrized: t
parametrize ["x", "y"]: t[x], t[y]
parametrize [1, 2]: t[x-1], t[x-2], t[y-1], t[y-2]
Parameters
argnames: A comma-separated string denoting one or more argument names, or a list/tuple of argument strings.
argvalues: The list of argvalues determines how often a test is invoked with different argument values.
If only one argname was specified argvalues is a list of values. If N argnames were specified, argvalues must be a list of N-tuples, where each tuple-element specifies a value for its respective argname.
indirect: A list of arguments' names (subset of argnames) or a boolean. If True the list contains all names from the argnames. Each argvalue corresponding to an argname in this list will be passed as request.param to its respective argname fixture function so that it can perform more expensive setups during the setup phase of a test rather than at collection time.
ids: Sequence of (or generator for) ids for
argvalues, or a callable to return part of the id for each argvalue.With sequences (and generators like
itertools.count()) the returned ids should be of typestring,int,float,bool, orNone. They are mapped to the corresponding index inargvalues.Nonemeans to use the auto-generated id.New in version 8.4:
from the test name. Can only be used at most 1 time, as test names need to be unique.
If it is a callable it will be called for each entry in
argvalues, and the return value is used as part of the auto-generated id for the whole set (where parts are joined with dashes ("-")). This is useful to provide more specific ids for certain items, e.g. dates. ReturningNonewill use an auto-generated id.If no ids are provided they will be generated automatically from the argvalues.
scope: If specified it denotes the scope of the parameters. The scope is used for grouping tests by parameter instances. It will also override any fixture-function defined scope, allowing to set a dynamic scope using test context or configuration.
557class Module(nodes.File, PyCollector): 558 """Collector for test classes and functions in a Python module.""" 559 560 def _getobj(self): 561 return importtestmodule(self.path, self.config) 562 563 def collect(self) -> Iterable[nodes.Item | nodes.Collector]: 564 self._register_setup_module_fixture() 565 self._register_setup_function_fixture() 566 self.session._fixturemanager.parsefactories(self) 567 return super().collect() 568 569 def _register_setup_module_fixture(self) -> None: 570 """Register an autouse, module-scoped fixture for the collected module object 571 that invokes setUpModule/tearDownModule if either or both are available. 572 573 Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with 574 other fixtures (#517). 575 """ 576 setup_module = _get_first_non_fixture_func( 577 self.obj, ("setUpModule", "setup_module") 578 ) 579 teardown_module = _get_first_non_fixture_func( 580 self.obj, ("tearDownModule", "teardown_module") 581 ) 582 583 if setup_module is None and teardown_module is None: 584 return 585 586 def xunit_setup_module_fixture(request) -> Generator[None]: 587 module = request.module 588 if setup_module is not None: 589 _call_with_optional_argument(setup_module, module) 590 yield 591 if teardown_module is not None: 592 _call_with_optional_argument(teardown_module, module) 593 594 self.session._fixturemanager._register_fixture( 595 # Use a unique name to speed up lookup. 596 name=f"_xunit_setup_module_fixture_{self.obj.__name__}", 597 func=xunit_setup_module_fixture, 598 nodeid=self.nodeid, 599 scope="module", 600 autouse=True, 601 ) 602 603 def _register_setup_function_fixture(self) -> None: 604 """Register an autouse, function-scoped fixture for the collected module object 605 that invokes setup_function/teardown_function if either or both are available. 606 607 Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with 608 other fixtures (#517). 609 """ 610 setup_function = _get_first_non_fixture_func(self.obj, ("setup_function",)) 611 teardown_function = _get_first_non_fixture_func( 612 self.obj, ("teardown_function",) 613 ) 614 if setup_function is None and teardown_function is None: 615 return 616 617 def xunit_setup_function_fixture(request) -> Generator[None]: 618 if request.instance is not None: 619 # in this case we are bound to an instance, so we need to let 620 # setup_method handle this 621 yield 622 return 623 function = request.function 624 if setup_function is not None: 625 _call_with_optional_argument(setup_function, function) 626 yield 627 if teardown_function is not None: 628 _call_with_optional_argument(teardown_function, function) 629 630 self.session._fixturemanager._register_fixture( 631 # Use a unique name to speed up lookup. 632 name=f"_xunit_setup_function_fixture_{self.obj.__name__}", 633 func=xunit_setup_function_fixture, 634 nodeid=self.nodeid, 635 scope="function", 636 autouse=True, 637 )
Collector for test classes and functions in a Python module.
563 def collect(self) -> Iterable[nodes.Item | nodes.Collector]: 564 self._register_setup_module_fixture() 565 self._register_setup_function_fixture() 566 self.session._fixturemanager.parsefactories(self) 567 return super().collect()
Collect children (items and collectors) for this collector.
119@final 120class MonkeyPatch: 121 """Helper to conveniently monkeypatch attributes/items/environment 122 variables/syspath. 123 124 Returned by the :fixture:`monkeypatch` fixture. 125 126 .. versionchanged:: 6.2 127 Can now also be used directly as `pytest.MonkeyPatch()`, for when 128 the fixture is not available. In this case, use 129 :meth:`with MonkeyPatch.context() as mp: <context>` or remember to call 130 :meth:`undo` explicitly. 131 """ 132 133 def __init__(self) -> None: 134 self._setattr: list[tuple[object, str, object]] = [] 135 self._setitem: list[tuple[Mapping[Any, Any], object, object]] = [] 136 self._cwd: str | None = None 137 self._savesyspath: list[str] | None = None 138 139 @classmethod 140 @contextmanager 141 def context(cls) -> Generator[MonkeyPatch]: 142 """Context manager that returns a new :class:`MonkeyPatch` object 143 which undoes any patching done inside the ``with`` block upon exit. 144 145 Example: 146 147 .. code-block:: python 148 149 import functools 150 151 152 def test_partial(monkeypatch): 153 with monkeypatch.context() as m: 154 m.setattr(functools, "partial", 3) 155 156 Useful in situations where it is desired to undo some patches before the test ends, 157 such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples 158 of this see :issue:`3290`). 159 """ 160 m = cls() 161 try: 162 yield m 163 finally: 164 m.undo() 165 166 @overload 167 def setattr( 168 self, 169 target: str, 170 name: object, 171 value: Notset = ..., 172 raising: bool = ..., 173 ) -> None: ... 174 175 @overload 176 def setattr( 177 self, 178 target: object, 179 name: str, 180 value: object, 181 raising: bool = ..., 182 ) -> None: ... 183 184 def setattr( 185 self, 186 target: str | object, 187 name: object | str, 188 value: object = notset, 189 raising: bool = True, 190 ) -> None: 191 """ 192 Set attribute value on target, memorizing the old value. 193 194 For example: 195 196 .. code-block:: python 197 198 import os 199 200 monkeypatch.setattr(os, "getcwd", lambda: "/") 201 202 The code above replaces the :func:`os.getcwd` function by a ``lambda`` which 203 always returns ``"/"``. 204 205 For convenience, you can specify a string as ``target`` which 206 will be interpreted as a dotted import path, with the last part 207 being the attribute name: 208 209 .. code-block:: python 210 211 monkeypatch.setattr("os.getcwd", lambda: "/") 212 213 Raises :class:`AttributeError` if the attribute does not exist, unless 214 ``raising`` is set to False. 215 216 **Where to patch** 217 218 ``monkeypatch.setattr`` works by (temporarily) changing the object that a name points to with another one. 219 There can be many names pointing to any individual object, so for patching to work you must ensure 220 that you patch the name used by the system under test. 221 222 See the section :ref:`Where to patch <python:where-to-patch>` in the :mod:`unittest.mock` 223 docs for a complete explanation, which is meant for :func:`unittest.mock.patch` but 224 applies to ``monkeypatch.setattr`` as well. 225 """ 226 __tracebackhide__ = True 227 import inspect 228 229 if isinstance(value, Notset): 230 if not isinstance(target, str): 231 raise TypeError( 232 "use setattr(target, name, value) or " 233 "setattr(target, value) with target being a dotted " 234 "import string" 235 ) 236 value = name 237 name, target = derive_importpath(target, raising) 238 else: 239 if not isinstance(name, str): 240 raise TypeError( 241 "use setattr(target, name, value) with name being a string or " 242 "setattr(target, value) with target being a dotted " 243 "import string" 244 ) 245 246 oldval = getattr(target, name, notset) 247 if raising and oldval is notset: 248 raise AttributeError(f"{target!r} has no attribute {name!r}") 249 250 # avoid class descriptors like staticmethod/classmethod 251 if inspect.isclass(target): 252 oldval = target.__dict__.get(name, notset) 253 self._setattr.append((target, name, oldval)) 254 setattr(target, name, value) 255 256 def delattr( 257 self, 258 target: object | str, 259 name: str | Notset = notset, 260 raising: bool = True, 261 ) -> None: 262 """Delete attribute ``name`` from ``target``. 263 264 If no ``name`` is specified and ``target`` is a string 265 it will be interpreted as a dotted import path with the 266 last part being the attribute name. 267 268 Raises AttributeError it the attribute does not exist, unless 269 ``raising`` is set to False. 270 """ 271 __tracebackhide__ = True 272 import inspect 273 274 if isinstance(name, Notset): 275 if not isinstance(target, str): 276 raise TypeError( 277 "use delattr(target, name) or " 278 "delattr(target) with target being a dotted " 279 "import string" 280 ) 281 name, target = derive_importpath(target, raising) 282 283 if not hasattr(target, name): 284 if raising: 285 raise AttributeError(name) 286 else: 287 oldval = getattr(target, name, notset) 288 # Avoid class descriptors like staticmethod/classmethod. 289 if inspect.isclass(target): 290 oldval = target.__dict__.get(name, notset) 291 self._setattr.append((target, name, oldval)) 292 delattr(target, name) 293 294 def setitem(self, dic: Mapping[K, V], name: K, value: V) -> None: 295 """Set dictionary entry ``name`` to value.""" 296 self._setitem.append((dic, name, dic.get(name, notset))) 297 # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict 298 dic[name] = value # type: ignore[index] 299 300 def delitem(self, dic: Mapping[K, V], name: K, raising: bool = True) -> None: 301 """Delete ``name`` from dict. 302 303 Raises ``KeyError`` if it doesn't exist, unless ``raising`` is set to 304 False. 305 """ 306 if name not in dic: 307 if raising: 308 raise KeyError(name) 309 else: 310 self._setitem.append((dic, name, dic.get(name, notset))) 311 # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict 312 del dic[name] # type: ignore[attr-defined] 313 314 def setenv(self, name: str, value: str, prepend: str | None = None) -> None: 315 """Set environment variable ``name`` to ``value``. 316 317 If ``prepend`` is a character, read the current environment variable 318 value and prepend the ``value`` adjoined with the ``prepend`` 319 character. 320 """ 321 if not isinstance(value, str): 322 warnings.warn( # type: ignore[unreachable] 323 PytestWarning( 324 f"Value of environment variable {name} type should be str, but got " 325 f"{value!r} (type: {type(value).__name__}); converted to str implicitly" 326 ), 327 stacklevel=2, 328 ) 329 value = str(value) 330 if prepend and name in os.environ: 331 value = value + prepend + os.environ[name] 332 self.setitem(os.environ, name, value) 333 334 def delenv(self, name: str, raising: bool = True) -> None: 335 """Delete ``name`` from the environment. 336 337 Raises ``KeyError`` if it does not exist, unless ``raising`` is set to 338 False. 339 """ 340 environ: MutableMapping[str, str] = os.environ 341 self.delitem(environ, name, raising=raising) 342 343 def syspath_prepend(self, path) -> None: 344 """Prepend ``path`` to ``sys.path`` list of import locations.""" 345 if self._savesyspath is None: 346 self._savesyspath = sys.path[:] 347 sys.path.insert(0, str(path)) 348 349 # https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171 350 # this is only needed when pkg_resources was already loaded by the namespace package 351 if "pkg_resources" in sys.modules: 352 import pkg_resources 353 from pkg_resources import fixup_namespace_packages 354 355 # Only issue deprecation warning if this call would actually have an 356 # effect for this specific path. 357 if ( 358 hasattr(pkg_resources, "_namespace_packages") 359 and pkg_resources._namespace_packages 360 ): 361 path_obj = Path(str(path)) 362 for ns_pkg in pkg_resources._namespace_packages: 363 if ns_pkg is None: 364 continue 365 ns_pkg_path = path_obj / ns_pkg.replace(".", os.sep) 366 if ns_pkg_path.is_dir(): 367 warnings.warn( 368 MONKEYPATCH_LEGACY_NAMESPACE_PACKAGES, stacklevel=2 369 ) 370 break 371 372 fixup_namespace_packages(str(path)) 373 374 # A call to syspathinsert() usually means that the caller wants to 375 # import some dynamically created files, thus with python3 we 376 # invalidate its import caches. 377 # This is especially important when any namespace package is in use, 378 # since then the mtime based FileFinder cache (that gets created in 379 # this case already) gets not invalidated when writing the new files 380 # quickly afterwards. 381 from importlib import invalidate_caches 382 383 invalidate_caches() 384 385 def chdir(self, path: str | os.PathLike[str]) -> None: 386 """Change the current working directory to the specified path. 387 388 :param path: 389 The path to change into. 390 """ 391 if self._cwd is None: 392 self._cwd = os.getcwd() 393 os.chdir(path) 394 395 def undo(self) -> None: 396 """Undo previous changes. 397 398 This call consumes the undo stack. Calling it a second time has no 399 effect unless you do more monkeypatching after the undo call. 400 401 There is generally no need to call `undo()`, since it is 402 called automatically during tear-down. 403 404 .. note:: 405 The same `monkeypatch` fixture is used across a 406 single test function invocation. If `monkeypatch` is used both by 407 the test function itself and one of the test fixtures, 408 calling `undo()` will undo all of the changes made in 409 both functions. 410 411 Prefer to use :meth:`context() <pytest.MonkeyPatch.context>` instead. 412 """ 413 for obj, name, value in reversed(self._setattr): 414 if value is not notset: 415 setattr(obj, name, value) 416 else: 417 delattr(obj, name) 418 self._setattr[:] = [] 419 for dictionary, key, value in reversed(self._setitem): 420 if value is notset: 421 try: 422 # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict 423 del dictionary[key] # type: ignore[attr-defined] 424 except KeyError: 425 pass # Was already deleted, so we have the desired state. 426 else: 427 # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict 428 dictionary[key] = value # type: ignore[index] 429 self._setitem[:] = [] 430 if self._savesyspath is not None: 431 sys.path[:] = self._savesyspath 432 self._savesyspath = None 433 434 if self._cwd is not None: 435 os.chdir(self._cwd) 436 self._cwd = None
Helper to conveniently monkeypatch attributes/items/environment variables/syspath.
Returned by the :fixture:monkeypatch fixture.
Changed in version 6.2:
Can now also be used directly as pytest.MonkeyPatch(), for when
the fixture is not available. In this case, use
with MonkeyPatch.context() as mp: <context>() or remember to call
undo() explicitly.
139 @classmethod 140 @contextmanager 141 def context(cls) -> Generator[MonkeyPatch]: 142 """Context manager that returns a new :class:`MonkeyPatch` object 143 which undoes any patching done inside the ``with`` block upon exit. 144 145 Example: 146 147 .. code-block:: python 148 149 import functools 150 151 152 def test_partial(monkeypatch): 153 with monkeypatch.context() as m: 154 m.setattr(functools, "partial", 3) 155 156 Useful in situations where it is desired to undo some patches before the test ends, 157 such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples 158 of this see :issue:`3290`). 159 """ 160 m = cls() 161 try: 162 yield m 163 finally: 164 m.undo()
Context manager that returns a new MonkeyPatch object
which undoes any patching done inside the with block upon exit.
Example:
import functools
def test_partial(monkeypatch):
with monkeypatch.context() as m:
m.setattr(functools, "partial", 3)
Useful in situations where it is desired to undo some patches before the test ends,
such as mocking stdlib functions that might break pytest itself if mocked (for examples
of this see :issue:3290).
184 def setattr( 185 self, 186 target: str | object, 187 name: object | str, 188 value: object = notset, 189 raising: bool = True, 190 ) -> None: 191 """ 192 Set attribute value on target, memorizing the old value. 193 194 For example: 195 196 .. code-block:: python 197 198 import os 199 200 monkeypatch.setattr(os, "getcwd", lambda: "/") 201 202 The code above replaces the :func:`os.getcwd` function by a ``lambda`` which 203 always returns ``"/"``. 204 205 For convenience, you can specify a string as ``target`` which 206 will be interpreted as a dotted import path, with the last part 207 being the attribute name: 208 209 .. code-block:: python 210 211 monkeypatch.setattr("os.getcwd", lambda: "/") 212 213 Raises :class:`AttributeError` if the attribute does not exist, unless 214 ``raising`` is set to False. 215 216 **Where to patch** 217 218 ``monkeypatch.setattr`` works by (temporarily) changing the object that a name points to with another one. 219 There can be many names pointing to any individual object, so for patching to work you must ensure 220 that you patch the name used by the system under test. 221 222 See the section :ref:`Where to patch <python:where-to-patch>` in the :mod:`unittest.mock` 223 docs for a complete explanation, which is meant for :func:`unittest.mock.patch` but 224 applies to ``monkeypatch.setattr`` as well. 225 """ 226 __tracebackhide__ = True 227 import inspect 228 229 if isinstance(value, Notset): 230 if not isinstance(target, str): 231 raise TypeError( 232 "use setattr(target, name, value) or " 233 "setattr(target, value) with target being a dotted " 234 "import string" 235 ) 236 value = name 237 name, target = derive_importpath(target, raising) 238 else: 239 if not isinstance(name, str): 240 raise TypeError( 241 "use setattr(target, name, value) with name being a string or " 242 "setattr(target, value) with target being a dotted " 243 "import string" 244 ) 245 246 oldval = getattr(target, name, notset) 247 if raising and oldval is notset: 248 raise AttributeError(f"{target!r} has no attribute {name!r}") 249 250 # avoid class descriptors like staticmethod/classmethod 251 if inspect.isclass(target): 252 oldval = target.__dict__.get(name, notset) 253 self._setattr.append((target, name, oldval)) 254 setattr(target, name, value)
Set attribute value on target, memorizing the old value.
For example:
import os
monkeypatch.setattr(os, "getcwd", lambda: "/")
The code above replaces the os.getcwd() function by a lambda which
always returns "/".
For convenience, you can specify a string as target which
will be interpreted as a dotted import path, with the last part
being the attribute name:
monkeypatch.setattr("os.getcwd", lambda: "/")
Raises AttributeError if the attribute does not exist, unless
raising is set to False.
Where to patch
monkeypatch.setattr works by (temporarily) changing the object that a name points to with another one.
There can be many names pointing to any individual object, so for patching to work you must ensure
that you patch the name used by the system under test.
See the section :ref:Where to patch <python:where-to-patch> in the unittest.mock
docs for a complete explanation, which is meant for unittest.mock.patch() but
applies to monkeypatch.setattr as well.
256 def delattr( 257 self, 258 target: object | str, 259 name: str | Notset = notset, 260 raising: bool = True, 261 ) -> None: 262 """Delete attribute ``name`` from ``target``. 263 264 If no ``name`` is specified and ``target`` is a string 265 it will be interpreted as a dotted import path with the 266 last part being the attribute name. 267 268 Raises AttributeError it the attribute does not exist, unless 269 ``raising`` is set to False. 270 """ 271 __tracebackhide__ = True 272 import inspect 273 274 if isinstance(name, Notset): 275 if not isinstance(target, str): 276 raise TypeError( 277 "use delattr(target, name) or " 278 "delattr(target) with target being a dotted " 279 "import string" 280 ) 281 name, target = derive_importpath(target, raising) 282 283 if not hasattr(target, name): 284 if raising: 285 raise AttributeError(name) 286 else: 287 oldval = getattr(target, name, notset) 288 # Avoid class descriptors like staticmethod/classmethod. 289 if inspect.isclass(target): 290 oldval = target.__dict__.get(name, notset) 291 self._setattr.append((target, name, oldval)) 292 delattr(target, name)
Delete attribute name from target.
If no name is specified and target is a string
it will be interpreted as a dotted import path with the
last part being the attribute name.
Raises AttributeError it the attribute does not exist, unless
raising is set to False.
294 def setitem(self, dic: Mapping[K, V], name: K, value: V) -> None: 295 """Set dictionary entry ``name`` to value.""" 296 self._setitem.append((dic, name, dic.get(name, notset))) 297 # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict 298 dic[name] = value # type: ignore[index]
Set dictionary entry name to value.
300 def delitem(self, dic: Mapping[K, V], name: K, raising: bool = True) -> None: 301 """Delete ``name`` from dict. 302 303 Raises ``KeyError`` if it doesn't exist, unless ``raising`` is set to 304 False. 305 """ 306 if name not in dic: 307 if raising: 308 raise KeyError(name) 309 else: 310 self._setitem.append((dic, name, dic.get(name, notset))) 311 # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict 312 del dic[name] # type: ignore[attr-defined]
Delete name from dict.
Raises KeyError if it doesn't exist, unless raising is set to
False.
314 def setenv(self, name: str, value: str, prepend: str | None = None) -> None: 315 """Set environment variable ``name`` to ``value``. 316 317 If ``prepend`` is a character, read the current environment variable 318 value and prepend the ``value`` adjoined with the ``prepend`` 319 character. 320 """ 321 if not isinstance(value, str): 322 warnings.warn( # type: ignore[unreachable] 323 PytestWarning( 324 f"Value of environment variable {name} type should be str, but got " 325 f"{value!r} (type: {type(value).__name__}); converted to str implicitly" 326 ), 327 stacklevel=2, 328 ) 329 value = str(value) 330 if prepend and name in os.environ: 331 value = value + prepend + os.environ[name] 332 self.setitem(os.environ, name, value)
Set environment variable name to value.
If prepend is a character, read the current environment variable
value and prepend the value adjoined with the prepend
character.
334 def delenv(self, name: str, raising: bool = True) -> None: 335 """Delete ``name`` from the environment. 336 337 Raises ``KeyError`` if it does not exist, unless ``raising`` is set to 338 False. 339 """ 340 environ: MutableMapping[str, str] = os.environ 341 self.delitem(environ, name, raising=raising)
Delete name from the environment.
Raises KeyError if it does not exist, unless raising is set to
False.
343 def syspath_prepend(self, path) -> None: 344 """Prepend ``path`` to ``sys.path`` list of import locations.""" 345 if self._savesyspath is None: 346 self._savesyspath = sys.path[:] 347 sys.path.insert(0, str(path)) 348 349 # https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171 350 # this is only needed when pkg_resources was already loaded by the namespace package 351 if "pkg_resources" in sys.modules: 352 import pkg_resources 353 from pkg_resources import fixup_namespace_packages 354 355 # Only issue deprecation warning if this call would actually have an 356 # effect for this specific path. 357 if ( 358 hasattr(pkg_resources, "_namespace_packages") 359 and pkg_resources._namespace_packages 360 ): 361 path_obj = Path(str(path)) 362 for ns_pkg in pkg_resources._namespace_packages: 363 if ns_pkg is None: 364 continue 365 ns_pkg_path = path_obj / ns_pkg.replace(".", os.sep) 366 if ns_pkg_path.is_dir(): 367 warnings.warn( 368 MONKEYPATCH_LEGACY_NAMESPACE_PACKAGES, stacklevel=2 369 ) 370 break 371 372 fixup_namespace_packages(str(path)) 373 374 # A call to syspathinsert() usually means that the caller wants to 375 # import some dynamically created files, thus with python3 we 376 # invalidate its import caches. 377 # This is especially important when any namespace package is in use, 378 # since then the mtime based FileFinder cache (that gets created in 379 # this case already) gets not invalidated when writing the new files 380 # quickly afterwards. 381 from importlib import invalidate_caches 382 383 invalidate_caches()
Prepend path to sys.path list of import locations.
385 def chdir(self, path: str | os.PathLike[str]) -> None: 386 """Change the current working directory to the specified path. 387 388 :param path: 389 The path to change into. 390 """ 391 if self._cwd is None: 392 self._cwd = os.getcwd() 393 os.chdir(path)
Change the current working directory to the specified path.
Parameters
- path: The path to change into.
395 def undo(self) -> None: 396 """Undo previous changes. 397 398 This call consumes the undo stack. Calling it a second time has no 399 effect unless you do more monkeypatching after the undo call. 400 401 There is generally no need to call `undo()`, since it is 402 called automatically during tear-down. 403 404 .. note:: 405 The same `monkeypatch` fixture is used across a 406 single test function invocation. If `monkeypatch` is used both by 407 the test function itself and one of the test fixtures, 408 calling `undo()` will undo all of the changes made in 409 both functions. 410 411 Prefer to use :meth:`context() <pytest.MonkeyPatch.context>` instead. 412 """ 413 for obj, name, value in reversed(self._setattr): 414 if value is not notset: 415 setattr(obj, name, value) 416 else: 417 delattr(obj, name) 418 self._setattr[:] = [] 419 for dictionary, key, value in reversed(self._setitem): 420 if value is notset: 421 try: 422 # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict 423 del dictionary[key] # type: ignore[attr-defined] 424 except KeyError: 425 pass # Was already deleted, so we have the desired state. 426 else: 427 # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict 428 dictionary[key] = value # type: ignore[index] 429 self._setitem[:] = [] 430 if self._savesyspath is not None: 431 sys.path[:] = self._savesyspath 432 self._savesyspath = None 433 434 if self._cwd is not None: 435 os.chdir(self._cwd) 436 self._cwd = None
Undo previous changes.
This call consumes the undo stack. Calling it a second time has no effect unless you do more monkeypatching after the undo call.
There is generally no need to call undo(), since it is
called automatically during tear-down.
The same monkeypatch fixture is used across a
single test function invocation. If monkeypatch is used both by
the test function itself and one of the test fixtures,
calling undo() will undo all of the changes made in
both functions.
Prefer to use context() <pytest.MonkeyPatch.context>() instead.
396class OptionGroup: 397 """A group of options shown in its own section.""" 398 399 def __init__( 400 self, 401 arggroup: argparse._ArgumentGroup, 402 name: str, 403 parser: Parser | None, 404 _ispytest: bool = False, 405 ) -> None: 406 check_ispytest(_ispytest) 407 self._arggroup = arggroup 408 self.name = name 409 self.options: list[Argument] = [] 410 self.parser = parser 411 412 def addoption(self, *opts: str, **attrs: Any) -> None: 413 """Add an option to this group. 414 415 If a shortened version of a long option is specified, it will 416 be suppressed in the help. ``addoption('--twowords', '--two-words')`` 417 results in help showing ``--two-words`` only, but ``--twowords`` gets 418 accepted **and** the automatic destination is in ``args.twowords``. 419 420 :param opts: 421 Option names, can be short or long options. 422 :param attrs: 423 Same attributes as the argparse library's :meth:`add_argument() 424 <argparse.ArgumentParser.add_argument>` function accepts. 425 """ 426 conflict = set(opts).intersection( 427 name for opt in self.options for name in opt.names() 428 ) 429 if conflict: 430 raise ValueError(f"option names {conflict} already added") 431 option = Argument(*opts, **attrs) 432 self._addoption_instance(option, shortupper=False) 433 434 def _addoption(self, *opts: str, **attrs: Any) -> None: 435 option = Argument(*opts, **attrs) 436 self._addoption_instance(option, shortupper=True) 437 438 def _addoption_instance(self, option: Argument, shortupper: bool = False) -> None: 439 if not shortupper: 440 for opt in option._short_opts: 441 if opt[0] == "-" and opt[1].islower(): 442 raise ValueError("lowercase shortoptions reserved") 443 444 if self.parser: 445 self.parser.processoption(option) 446 447 self._arggroup.add_argument(*option.names(), **option.attrs()) 448 self.options.append(option)
A group of options shown in its own section.
412 def addoption(self, *opts: str, **attrs: Any) -> None: 413 """Add an option to this group. 414 415 If a shortened version of a long option is specified, it will 416 be suppressed in the help. ``addoption('--twowords', '--two-words')`` 417 results in help showing ``--two-words`` only, but ``--twowords`` gets 418 accepted **and** the automatic destination is in ``args.twowords``. 419 420 :param opts: 421 Option names, can be short or long options. 422 :param attrs: 423 Same attributes as the argparse library's :meth:`add_argument() 424 <argparse.ArgumentParser.add_argument>` function accepts. 425 """ 426 conflict = set(opts).intersection( 427 name for opt in self.options for name in opt.names() 428 ) 429 if conflict: 430 raise ValueError(f"option names {conflict} already added") 431 option = Argument(*opts, **attrs) 432 self._addoption_instance(option, shortupper=False)
Add an option to this group.
If a shortened version of a long option is specified, it will
be suppressed in the help. addoption('--twowords', '--two-words')
results in help showing --two-words only, but --twowords gets
accepted and the automatic destination is in args.twowords.
Parameters
- opts: Option names, can be short or long options.
- attrs:
Same attributes as the argparse library's
add_argument() <argparse.ArgumentParser.add_argument>()function accepts.
640class Package(nodes.Directory): 641 """Collector for files and directories in a Python packages -- directories 642 with an `__init__.py` file. 643 644 .. note:: 645 646 Directories without an `__init__.py` file are instead collected by 647 :class:`~pytest.Dir` by default. Both are :class:`~pytest.Directory` 648 collectors. 649 650 .. versionchanged:: 8.0 651 652 Now inherits from :class:`~pytest.Directory`. 653 """ 654 655 def __init__( 656 self, 657 fspath: LEGACY_PATH | None, 658 parent: nodes.Collector, 659 # NOTE: following args are unused: 660 config=None, 661 session=None, 662 nodeid=None, 663 path: Path | None = None, 664 ) -> None: 665 # NOTE: Could be just the following, but kept as-is for compat. 666 # super().__init__(self, fspath, parent=parent) 667 session = parent.session 668 super().__init__( 669 fspath=fspath, 670 path=path, 671 parent=parent, 672 config=config, 673 session=session, 674 nodeid=nodeid, 675 ) 676 677 def setup(self) -> None: 678 init_mod = importtestmodule(self.path / "__init__.py", self.config) 679 680 # Not using fixtures to call setup_module here because autouse fixtures 681 # from packages are not called automatically (#4085). 682 setup_module = _get_first_non_fixture_func( 683 init_mod, ("setUpModule", "setup_module") 684 ) 685 if setup_module is not None: 686 _call_with_optional_argument(setup_module, init_mod) 687 688 teardown_module = _get_first_non_fixture_func( 689 init_mod, ("tearDownModule", "teardown_module") 690 ) 691 if teardown_module is not None: 692 func = partial(_call_with_optional_argument, teardown_module, init_mod) 693 self.addfinalizer(func) 694 695 def collect(self) -> Iterable[nodes.Item | nodes.Collector]: 696 # Always collect __init__.py first. 697 def sort_key(entry: os.DirEntry[str]) -> object: 698 return (entry.name != "__init__.py", entry.name) 699 700 config = self.config 701 col: nodes.Collector | None 702 cols: Sequence[nodes.Collector] 703 ihook = self.ihook 704 for direntry in scandir(self.path, sort_key): 705 if direntry.is_dir(): 706 path = Path(direntry.path) 707 if not self.session.isinitpath(path, with_parents=True): 708 if ihook.pytest_ignore_collect(collection_path=path, config=config): 709 continue 710 col = ihook.pytest_collect_directory(path=path, parent=self) 711 if col is not None: 712 yield col 713 714 elif direntry.is_file(): 715 path = Path(direntry.path) 716 if not self.session.isinitpath(path): 717 if ihook.pytest_ignore_collect(collection_path=path, config=config): 718 continue 719 cols = ihook.pytest_collect_file(file_path=path, parent=self) 720 yield from cols
Collector for files and directories in a Python packages -- directories
with an __init__.py file.
Directories without an __init__.py file are instead collected by
~pytest.Dir by default. Both are ~pytest.Directory
collectors.
Changed in version 8.0:
Now inherits from ~pytest.Directory.
655 def __init__( 656 self, 657 fspath: LEGACY_PATH | None, 658 parent: nodes.Collector, 659 # NOTE: following args are unused: 660 config=None, 661 session=None, 662 nodeid=None, 663 path: Path | None = None, 664 ) -> None: 665 # NOTE: Could be just the following, but kept as-is for compat. 666 # super().__init__(self, fspath, parent=parent) 667 session = parent.session 668 super().__init__( 669 fspath=fspath, 670 path=path, 671 parent=parent, 672 config=config, 673 session=session, 674 nodeid=nodeid, 675 )
677 def setup(self) -> None: 678 init_mod = importtestmodule(self.path / "__init__.py", self.config) 679 680 # Not using fixtures to call setup_module here because autouse fixtures 681 # from packages are not called automatically (#4085). 682 setup_module = _get_first_non_fixture_func( 683 init_mod, ("setUpModule", "setup_module") 684 ) 685 if setup_module is not None: 686 _call_with_optional_argument(setup_module, init_mod) 687 688 teardown_module = _get_first_non_fixture_func( 689 init_mod, ("tearDownModule", "teardown_module") 690 ) 691 if teardown_module is not None: 692 func = partial(_call_with_optional_argument, teardown_module, init_mod) 693 self.addfinalizer(func)
695 def collect(self) -> Iterable[nodes.Item | nodes.Collector]: 696 # Always collect __init__.py first. 697 def sort_key(entry: os.DirEntry[str]) -> object: 698 return (entry.name != "__init__.py", entry.name) 699 700 config = self.config 701 col: nodes.Collector | None 702 cols: Sequence[nodes.Collector] 703 ihook = self.ihook 704 for direntry in scandir(self.path, sort_key): 705 if direntry.is_dir(): 706 path = Path(direntry.path) 707 if not self.session.isinitpath(path, with_parents=True): 708 if ihook.pytest_ignore_collect(collection_path=path, config=config): 709 continue 710 col = ihook.pytest_collect_directory(path=path, parent=self) 711 if col is not None: 712 yield col 713 714 elif direntry.is_file(): 715 path = Path(direntry.path) 716 if not self.session.isinitpath(path): 717 if ihook.pytest_ignore_collect(collection_path=path, config=config): 718 continue 719 cols = ihook.pytest_collect_file(file_path=path, parent=self) 720 yield from cols
Collect children (items and collectors) for this collector.
32@final 33class Parser: 34 """Parser for command line arguments and config-file values. 35 36 :ivar extra_info: Dict of generic param -> value to display in case 37 there's an error processing the command line arguments. 38 """ 39 40 def __init__( 41 self, 42 usage: str | None = None, 43 processopt: Callable[[Argument], None] | None = None, 44 *, 45 _ispytest: bool = False, 46 ) -> None: 47 check_ispytest(_ispytest) 48 49 from _pytest._argcomplete import filescompleter 50 51 self._processopt = processopt 52 self.extra_info: dict[str, Any] = {} 53 self.optparser = PytestArgumentParser(self, usage, self.extra_info) 54 anonymous_arggroup = self.optparser.add_argument_group("Custom options") 55 self._anonymous = OptionGroup( 56 anonymous_arggroup, "_anonymous", self, _ispytest=True 57 ) 58 self._groups = [self._anonymous] 59 file_or_dir_arg = self.optparser.add_argument(FILE_OR_DIR, nargs="*") 60 file_or_dir_arg.completer = filescompleter # type: ignore 61 62 self._inidict: dict[str, tuple[str, str, Any]] = {} 63 # Maps alias -> canonical name. 64 self._ini_aliases: dict[str, str] = {} 65 66 @property 67 def prog(self) -> str: 68 return self.optparser.prog 69 70 @prog.setter 71 def prog(self, value: str) -> None: 72 self.optparser.prog = value 73 74 def processoption(self, option: Argument) -> None: 75 if self._processopt: 76 if option.dest: 77 self._processopt(option) 78 79 def getgroup( 80 self, name: str, description: str = "", after: str | None = None 81 ) -> OptionGroup: 82 """Get (or create) a named option Group. 83 84 :param name: Name of the option group. 85 :param description: Long description for --help output. 86 :param after: Name of another group, used for ordering --help output. 87 :returns: The option group. 88 89 The returned group object has an ``addoption`` method with the same 90 signature as :func:`parser.addoption <pytest.Parser.addoption>` but 91 will be shown in the respective group in the output of 92 ``pytest --help``. 93 """ 94 for group in self._groups: 95 if group.name == name: 96 return group 97 98 arggroup = self.optparser.add_argument_group(description or name) 99 group = OptionGroup(arggroup, name, self, _ispytest=True) 100 i = 0 101 for i, grp in enumerate(self._groups): 102 if grp.name == after: 103 break 104 self._groups.insert(i + 1, group) 105 # argparse doesn't provide a way to control `--help` order, so must 106 # access its internals ☹. 107 self.optparser._action_groups.insert(i + 1, self.optparser._action_groups.pop()) 108 return group 109 110 def addoption(self, *opts: str, **attrs: Any) -> None: 111 """Register a command line option. 112 113 :param opts: 114 Option names, can be short or long options. 115 :param attrs: 116 Same attributes as the argparse library's :meth:`add_argument() 117 <argparse.ArgumentParser.add_argument>` function accepts. 118 119 After command line parsing, options are available on the pytest config 120 object via ``config.option.NAME`` where ``NAME`` is usually set 121 by passing a ``dest`` attribute, for example 122 ``addoption("--long", dest="NAME", ...)``. 123 """ 124 self._anonymous.addoption(*opts, **attrs) 125 126 def parse( 127 self, 128 args: Sequence[str | os.PathLike[str]], 129 namespace: argparse.Namespace | None = None, 130 ) -> argparse.Namespace: 131 """Parse the arguments. 132 133 Unlike ``parse_known_args`` and ``parse_known_and_unknown_args``, 134 raises PrintHelp on `--help` and UsageError on unknown flags 135 136 :meta private: 137 """ 138 from _pytest._argcomplete import try_argcomplete 139 140 try_argcomplete(self.optparser) 141 strargs = [os.fspath(x) for x in args] 142 if namespace is None: 143 namespace = argparse.Namespace() 144 try: 145 namespace._raise_print_help = True 146 return self.optparser.parse_intermixed_args(strargs, namespace=namespace) 147 finally: 148 del namespace._raise_print_help 149 150 def parse_known_args( 151 self, 152 args: Sequence[str | os.PathLike[str]], 153 namespace: argparse.Namespace | None = None, 154 ) -> argparse.Namespace: 155 """Parse the known arguments at this point. 156 157 :returns: An argparse namespace object. 158 """ 159 return self.parse_known_and_unknown_args(args, namespace=namespace)[0] 160 161 def parse_known_and_unknown_args( 162 self, 163 args: Sequence[str | os.PathLike[str]], 164 namespace: argparse.Namespace | None = None, 165 ) -> tuple[argparse.Namespace, list[str]]: 166 """Parse the known arguments at this point, and also return the 167 remaining unknown flag arguments. 168 169 :returns: 170 A tuple containing an argparse namespace object for the known 171 arguments, and a list of unknown flag arguments. 172 """ 173 strargs = [os.fspath(x) for x in args] 174 if sys.version_info < (3, 12, 8) or (3, 13) <= sys.version_info < (3, 13, 1): 175 # Older argparse have a bugged parse_known_intermixed_args. 176 namespace, unknown = self.optparser.parse_known_args(strargs, namespace) 177 assert namespace is not None 178 file_or_dir = getattr(namespace, FILE_OR_DIR) 179 unknown_flags: list[str] = [] 180 for arg in unknown: 181 (unknown_flags if arg.startswith("-") else file_or_dir).append(arg) 182 return namespace, unknown_flags 183 else: 184 return self.optparser.parse_known_intermixed_args(strargs, namespace) 185 186 def addini( 187 self, 188 name: str, 189 help: str, 190 type: Literal[ 191 "string", "paths", "pathlist", "args", "linelist", "bool", "int", "float" 192 ] 193 | None = None, 194 default: Any = NOT_SET, 195 *, 196 aliases: Sequence[str] = (), 197 ) -> None: 198 """Register a configuration file option. 199 200 :param name: 201 Name of the configuration. 202 :param type: 203 Type of the configuration. Can be: 204 205 * ``string``: a string 206 * ``bool``: a boolean 207 * ``args``: a list of strings, separated as in a shell 208 * ``linelist``: a list of strings, separated by line breaks 209 * ``paths``: a list of :class:`pathlib.Path`, separated as in a shell 210 * ``pathlist``: a list of ``py.path``, separated as in a shell 211 * ``int``: an integer 212 * ``float``: a floating-point number 213 214 .. versionadded:: 8.4 215 216 The ``float`` and ``int`` types. 217 218 For ``paths`` and ``pathlist`` types, they are considered relative to the config-file. 219 In case the execution is happening without a config-file defined, 220 they will be considered relative to the current working directory (for example with ``--override-ini``). 221 222 .. versionadded:: 7.0 223 The ``paths`` variable type. 224 225 .. versionadded:: 8.1 226 Use the current working directory to resolve ``paths`` and ``pathlist`` in the absence of a config-file. 227 228 Defaults to ``string`` if ``None`` or not passed. 229 :param default: 230 Default value if no config-file option exists but is queried. 231 :param aliases: 232 Additional names by which this option can be referenced. 233 Aliases resolve to the canonical name. 234 235 .. versionadded:: 9.0 236 The ``aliases`` parameter. 237 238 The value of configuration keys can be retrieved via a call to 239 :py:func:`config.getini(name) <pytest.Config.getini>`. 240 """ 241 assert type in ( 242 None, 243 "string", 244 "paths", 245 "pathlist", 246 "args", 247 "linelist", 248 "bool", 249 "int", 250 "float", 251 ) 252 if type is None: 253 type = "string" 254 if default is NOT_SET: 255 default = get_ini_default_for_type(type) 256 257 self._inidict[name] = (help, type, default) 258 259 for alias in aliases: 260 if alias in self._inidict: 261 raise ValueError( 262 f"alias {alias!r} conflicts with existing configuration option" 263 ) 264 if (already := self._ini_aliases.get(alias)) is not None: 265 raise ValueError(f"{alias!r} is already an alias of {already!r}") 266 self._ini_aliases[alias] = name
Parser for command line arguments and config-file values.
:ivar extra_info: Dict of generic param -> value to display in case there's an error processing the command line arguments.
40 def __init__( 41 self, 42 usage: str | None = None, 43 processopt: Callable[[Argument], None] | None = None, 44 *, 45 _ispytest: bool = False, 46 ) -> None: 47 check_ispytest(_ispytest) 48 49 from _pytest._argcomplete import filescompleter 50 51 self._processopt = processopt 52 self.extra_info: dict[str, Any] = {} 53 self.optparser = PytestArgumentParser(self, usage, self.extra_info) 54 anonymous_arggroup = self.optparser.add_argument_group("Custom options") 55 self._anonymous = OptionGroup( 56 anonymous_arggroup, "_anonymous", self, _ispytest=True 57 ) 58 self._groups = [self._anonymous] 59 file_or_dir_arg = self.optparser.add_argument(FILE_OR_DIR, nargs="*") 60 file_or_dir_arg.completer = filescompleter # type: ignore 61 62 self._inidict: dict[str, tuple[str, str, Any]] = {} 63 # Maps alias -> canonical name. 64 self._ini_aliases: dict[str, str] = {}
79 def getgroup( 80 self, name: str, description: str = "", after: str | None = None 81 ) -> OptionGroup: 82 """Get (or create) a named option Group. 83 84 :param name: Name of the option group. 85 :param description: Long description for --help output. 86 :param after: Name of another group, used for ordering --help output. 87 :returns: The option group. 88 89 The returned group object has an ``addoption`` method with the same 90 signature as :func:`parser.addoption <pytest.Parser.addoption>` but 91 will be shown in the respective group in the output of 92 ``pytest --help``. 93 """ 94 for group in self._groups: 95 if group.name == name: 96 return group 97 98 arggroup = self.optparser.add_argument_group(description or name) 99 group = OptionGroup(arggroup, name, self, _ispytest=True) 100 i = 0 101 for i, grp in enumerate(self._groups): 102 if grp.name == after: 103 break 104 self._groups.insert(i + 1, group) 105 # argparse doesn't provide a way to control `--help` order, so must 106 # access its internals ☹. 107 self.optparser._action_groups.insert(i + 1, self.optparser._action_groups.pop()) 108 return group
Get (or create) a named option Group.
Parameters
- name: Name of the option group.
- description: Long description for --help output.
- after: Name of another group, used for ordering --help output. :returns: The option group.
The returned group object has an addoption method with the same
signature as parser.addoption <pytest.Parser.addoption>() but
will be shown in the respective group in the output of
pytest --help.
110 def addoption(self, *opts: str, **attrs: Any) -> None: 111 """Register a command line option. 112 113 :param opts: 114 Option names, can be short or long options. 115 :param attrs: 116 Same attributes as the argparse library's :meth:`add_argument() 117 <argparse.ArgumentParser.add_argument>` function accepts. 118 119 After command line parsing, options are available on the pytest config 120 object via ``config.option.NAME`` where ``NAME`` is usually set 121 by passing a ``dest`` attribute, for example 122 ``addoption("--long", dest="NAME", ...)``. 123 """ 124 self._anonymous.addoption(*opts, **attrs)
Register a command line option.
Parameters
- opts: Option names, can be short or long options.
- attrs:
Same attributes as the argparse library's
add_argument() <argparse.ArgumentParser.add_argument>()function accepts.
After command line parsing, options are available on the pytest config
object via config.option.NAME where NAME is usually set
by passing a dest attribute, for example
addoption("--long", dest="NAME", ...).
126 def parse( 127 self, 128 args: Sequence[str | os.PathLike[str]], 129 namespace: argparse.Namespace | None = None, 130 ) -> argparse.Namespace: 131 """Parse the arguments. 132 133 Unlike ``parse_known_args`` and ``parse_known_and_unknown_args``, 134 raises PrintHelp on `--help` and UsageError on unknown flags 135 136 :meta private: 137 """ 138 from _pytest._argcomplete import try_argcomplete 139 140 try_argcomplete(self.optparser) 141 strargs = [os.fspath(x) for x in args] 142 if namespace is None: 143 namespace = argparse.Namespace() 144 try: 145 namespace._raise_print_help = True 146 return self.optparser.parse_intermixed_args(strargs, namespace=namespace) 147 finally: 148 del namespace._raise_print_help
Parse the arguments.
Unlike parse_known_args and parse_known_and_unknown_args,
raises PrintHelp on --help and UsageError on unknown flags
:meta private:
150 def parse_known_args( 151 self, 152 args: Sequence[str | os.PathLike[str]], 153 namespace: argparse.Namespace | None = None, 154 ) -> argparse.Namespace: 155 """Parse the known arguments at this point. 156 157 :returns: An argparse namespace object. 158 """ 159 return self.parse_known_and_unknown_args(args, namespace=namespace)[0]
Parse the known arguments at this point.
:returns: An argparse namespace object.
161 def parse_known_and_unknown_args( 162 self, 163 args: Sequence[str | os.PathLike[str]], 164 namespace: argparse.Namespace | None = None, 165 ) -> tuple[argparse.Namespace, list[str]]: 166 """Parse the known arguments at this point, and also return the 167 remaining unknown flag arguments. 168 169 :returns: 170 A tuple containing an argparse namespace object for the known 171 arguments, and a list of unknown flag arguments. 172 """ 173 strargs = [os.fspath(x) for x in args] 174 if sys.version_info < (3, 12, 8) or (3, 13) <= sys.version_info < (3, 13, 1): 175 # Older argparse have a bugged parse_known_intermixed_args. 176 namespace, unknown = self.optparser.parse_known_args(strargs, namespace) 177 assert namespace is not None 178 file_or_dir = getattr(namespace, FILE_OR_DIR) 179 unknown_flags: list[str] = [] 180 for arg in unknown: 181 (unknown_flags if arg.startswith("-") else file_or_dir).append(arg) 182 return namespace, unknown_flags 183 else: 184 return self.optparser.parse_known_intermixed_args(strargs, namespace)
Parse the known arguments at this point, and also return the remaining unknown flag arguments.
:returns: A tuple containing an argparse namespace object for the known arguments, and a list of unknown flag arguments.
186 def addini( 187 self, 188 name: str, 189 help: str, 190 type: Literal[ 191 "string", "paths", "pathlist", "args", "linelist", "bool", "int", "float" 192 ] 193 | None = None, 194 default: Any = NOT_SET, 195 *, 196 aliases: Sequence[str] = (), 197 ) -> None: 198 """Register a configuration file option. 199 200 :param name: 201 Name of the configuration. 202 :param type: 203 Type of the configuration. Can be: 204 205 * ``string``: a string 206 * ``bool``: a boolean 207 * ``args``: a list of strings, separated as in a shell 208 * ``linelist``: a list of strings, separated by line breaks 209 * ``paths``: a list of :class:`pathlib.Path`, separated as in a shell 210 * ``pathlist``: a list of ``py.path``, separated as in a shell 211 * ``int``: an integer 212 * ``float``: a floating-point number 213 214 .. versionadded:: 8.4 215 216 The ``float`` and ``int`` types. 217 218 For ``paths`` and ``pathlist`` types, they are considered relative to the config-file. 219 In case the execution is happening without a config-file defined, 220 they will be considered relative to the current working directory (for example with ``--override-ini``). 221 222 .. versionadded:: 7.0 223 The ``paths`` variable type. 224 225 .. versionadded:: 8.1 226 Use the current working directory to resolve ``paths`` and ``pathlist`` in the absence of a config-file. 227 228 Defaults to ``string`` if ``None`` or not passed. 229 :param default: 230 Default value if no config-file option exists but is queried. 231 :param aliases: 232 Additional names by which this option can be referenced. 233 Aliases resolve to the canonical name. 234 235 .. versionadded:: 9.0 236 The ``aliases`` parameter. 237 238 The value of configuration keys can be retrieved via a call to 239 :py:func:`config.getini(name) <pytest.Config.getini>`. 240 """ 241 assert type in ( 242 None, 243 "string", 244 "paths", 245 "pathlist", 246 "args", 247 "linelist", 248 "bool", 249 "int", 250 "float", 251 ) 252 if type is None: 253 type = "string" 254 if default is NOT_SET: 255 default = get_ini_default_for_type(type) 256 257 self._inidict[name] = (help, type, default) 258 259 for alias in aliases: 260 if alias in self._inidict: 261 raise ValueError( 262 f"alias {alias!r} conflicts with existing configuration option" 263 ) 264 if (already := self._ini_aliases.get(alias)) is not None: 265 raise ValueError(f"{alias!r} is already an alias of {already!r}") 266 self._ini_aliases[alias] = name
Register a configuration file option.
Parameters
- name: Name of the configuration.
type: Type of the configuration. Can be:
* ``string``: a string * ``bool``: a boolean * ``args``: a list of strings, separated as in a shell * ``linelist``: a list of strings, separated by line breaks * ``paths``: a list of `pathlib.Path`, separated as in a shell * ``pathlist``: a list of ``py.path``, separated as in a shell * ``int``: an integer * ``float``: a floating-point number *New in version 8.4:* The ``float`` and ``int`` types.For
pathsandpathlisttypes, they are considered relative to the config-file. In case the execution is happening without a config-file defined, they will be considered relative to the current working directory (for example with--override-ini).New in version 7.0: The
pathsvariable type.New in version 8.1: Use the current working directory to resolve
pathsandpathlistin the absence of a config-file.Defaults to
stringifNoneor not passed.- default: Default value if no config-file option exists but is queried.
aliases: Additional names by which this option can be referenced. Aliases resolve to the canonical name.
New in version 9.0: The
aliasesparameter.
The value of configuration keys can be retrieved via a call to
config.getini(name) <pytest.Config.getini>().
20from _pytest.config import PytestPluginManager
Warning emitted by the pytest assert rewrite module.
27from _pytest.fixtures import FixtureDef
Warning emitted by the cache plugin in various situations.
41from _pytest.mark import MarkGenerator
Warning emitted when pytest is not able to collect a file or symbol in a module.
34from _pytest.logging import LogCaptureFixture
Warning emitted for configuration issues.
48from _pytest.outcomes import exit
Warning class for features that will be removed in a future version.
66from _pytest.raises import RaisesGroup
Warning category used to denote experiments in pytest.
Use sparingly as the API might change or even be removed completely in a future version.
144 "PytestUnraisableExceptionWarning",
When the lsof plugin finds leaked fds.
411@final 412class PytestPluginManager(PluginManager): 413 """A :py:class:`pluggy.PluginManager <pluggy.PluginManager>` with 414 additional pytest-specific functionality: 415 416 * Loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and 417 ``pytest_plugins`` global variables found in plugins being loaded. 418 * ``conftest.py`` loading during start-up. 419 """ 420 421 def __init__(self) -> None: 422 from _pytest.assertion import DummyRewriteHook 423 from _pytest.assertion import RewriteHook 424 425 super().__init__("pytest") 426 427 # -- State related to local conftest plugins. 428 # All loaded conftest modules. 429 self._conftest_plugins: set[types.ModuleType] = set() 430 # All conftest modules applicable for a directory. 431 # This includes the directory's own conftest modules as well 432 # as those of its parent directories. 433 self._dirpath2confmods: dict[pathlib.Path, list[types.ModuleType]] = {} 434 # Cutoff directory above which conftests are no longer discovered. 435 self._confcutdir: pathlib.Path | None = None 436 # If set, conftest loading is skipped. 437 self._noconftest = False 438 439 # _getconftestmodules()'s call to _get_directory() causes a stat 440 # storm when it's called potentially thousands of times in a test 441 # session (#9478), often with the same path, so cache it. 442 self._get_directory = lru_cache(256)(_get_directory) 443 444 # plugins that were explicitly skipped with pytest.skip 445 # list of (module name, skip reason) 446 # previously we would issue a warning when a plugin was skipped, but 447 # since we refactored warnings as first citizens of Config, they are 448 # just stored here to be used later. 449 self.skipped_plugins: list[tuple[str, str]] = [] 450 451 self.add_hookspecs(_pytest.hookspec) 452 self.register(self) 453 if os.environ.get("PYTEST_DEBUG"): 454 err: IO[str] = sys.stderr 455 encoding: str = getattr(err, "encoding", "utf8") 456 try: 457 err = open( 458 os.dup(err.fileno()), 459 mode=err.mode, 460 buffering=1, 461 encoding=encoding, 462 ) 463 except Exception: 464 pass 465 self.trace.root.setwriter(err.write) 466 self.enable_tracing() 467 468 # Config._consider_importhook will set a real object if required. 469 self.rewrite_hook: RewriteHook = DummyRewriteHook() 470 # Used to know when we are importing conftests after the pytest_configure stage. 471 self._configured = False 472 473 def parse_hookimpl_opts( 474 self, plugin: _PluggyPlugin, name: str 475 ) -> HookimplOpts | None: 476 """:meta private:""" 477 # pytest hooks are always prefixed with "pytest_", 478 # so we avoid accessing possibly non-readable attributes 479 # (see issue #1073). 480 if not name.startswith("pytest_"): 481 return None 482 # Ignore names which cannot be hooks. 483 if name == "pytest_plugins": 484 return None 485 486 opts = super().parse_hookimpl_opts(plugin, name) 487 if opts is not None: 488 return opts 489 490 method = getattr(plugin, name) 491 # Consider only actual functions for hooks (#3775). 492 if not inspect.isroutine(method): 493 return None 494 # Collect unmarked hooks as long as they have the `pytest_' prefix. 495 legacy = _get_legacy_hook_marks( 496 method, "impl", ("tryfirst", "trylast", "optionalhook", "hookwrapper") 497 ) 498 return cast(HookimplOpts, legacy) 499 500 def parse_hookspec_opts(self, module_or_class, name: str) -> HookspecOpts | None: 501 """:meta private:""" 502 opts = super().parse_hookspec_opts(module_or_class, name) 503 if opts is None: 504 method = getattr(module_or_class, name) 505 if name.startswith("pytest_"): 506 legacy = _get_legacy_hook_marks( 507 method, "spec", ("firstresult", "historic") 508 ) 509 opts = cast(HookspecOpts, legacy) 510 return opts 511 512 def register(self, plugin: _PluggyPlugin, name: str | None = None) -> str | None: 513 if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS: 514 warnings.warn( 515 PytestConfigWarning( 516 "{} plugin has been merged into the core, " 517 "please remove it from your requirements.".format( 518 name.replace("_", "-") 519 ) 520 ) 521 ) 522 return None 523 plugin_name = super().register(plugin, name) 524 if plugin_name is not None: 525 self.hook.pytest_plugin_registered.call_historic( 526 kwargs=dict( 527 plugin=plugin, 528 plugin_name=plugin_name, 529 manager=self, 530 ) 531 ) 532 533 if isinstance(plugin, types.ModuleType): 534 self.consider_module(plugin) 535 return plugin_name 536 537 def getplugin(self, name: str): 538 # Support deprecated naming because plugins (xdist e.g.) use it. 539 plugin: _PluggyPlugin | None = self.get_plugin(name) 540 return plugin 541 542 def hasplugin(self, name: str) -> bool: 543 """Return whether a plugin with the given name is registered.""" 544 return bool(self.get_plugin(name)) 545 546 def pytest_configure(self, config: Config) -> None: 547 """:meta private:""" 548 # XXX now that the pluginmanager exposes hookimpl(tryfirst...) 549 # we should remove tryfirst/trylast as markers. 550 config.addinivalue_line( 551 "markers", 552 "tryfirst: mark a hook implementation function such that the " 553 "plugin machinery will try to call it first/as early as possible. " 554 "DEPRECATED, use @pytest.hookimpl(tryfirst=True) instead.", 555 ) 556 config.addinivalue_line( 557 "markers", 558 "trylast: mark a hook implementation function such that the " 559 "plugin machinery will try to call it last/as late as possible. " 560 "DEPRECATED, use @pytest.hookimpl(trylast=True) instead.", 561 ) 562 self._configured = True 563 564 # 565 # Internal API for local conftest plugin handling. 566 # 567 def _set_initial_conftests( 568 self, 569 args: Sequence[str | pathlib.Path], 570 pyargs: bool, 571 noconftest: bool, 572 rootpath: pathlib.Path, 573 confcutdir: pathlib.Path | None, 574 invocation_dir: pathlib.Path, 575 importmode: ImportMode | str, 576 *, 577 consider_namespace_packages: bool, 578 ) -> None: 579 """Load initial conftest files given a preparsed "namespace". 580 581 As conftest files may add their own command line options which have 582 arguments ('--my-opt somepath') we might get some false positives. 583 All builtin and 3rd party plugins will have been loaded, however, so 584 common options will not confuse our logic here. 585 """ 586 self._confcutdir = ( 587 absolutepath(invocation_dir / confcutdir) if confcutdir else None 588 ) 589 self._noconftest = noconftest 590 self._using_pyargs = pyargs 591 foundanchor = False 592 for initial_path in args: 593 path = str(initial_path) 594 # remove node-id syntax 595 i = path.find("::") 596 if i != -1: 597 path = path[:i] 598 anchor = absolutepath(invocation_dir / path) 599 600 # Ensure we do not break if what appears to be an anchor 601 # is in fact a very long option (#10169, #11394). 602 if safe_exists(anchor): 603 self._try_load_conftest( 604 anchor, 605 importmode, 606 rootpath, 607 consider_namespace_packages=consider_namespace_packages, 608 ) 609 foundanchor = True 610 if not foundanchor: 611 self._try_load_conftest( 612 invocation_dir, 613 importmode, 614 rootpath, 615 consider_namespace_packages=consider_namespace_packages, 616 ) 617 618 def _is_in_confcutdir(self, path: pathlib.Path) -> bool: 619 """Whether to consider the given path to load conftests from.""" 620 if self._confcutdir is None: 621 return True 622 # The semantics here are literally: 623 # Do not load a conftest if it is found upwards from confcut dir. 624 # But this is *not* the same as: 625 # Load only conftests from confcutdir or below. 626 # At first glance they might seem the same thing, however we do support use cases where 627 # we want to load conftests that are not found in confcutdir or below, but are found 628 # in completely different directory hierarchies like packages installed 629 # in out-of-source trees. 630 # (see #9767 for a regression where the logic was inverted). 631 return path not in self._confcutdir.parents 632 633 def _try_load_conftest( 634 self, 635 anchor: pathlib.Path, 636 importmode: str | ImportMode, 637 rootpath: pathlib.Path, 638 *, 639 consider_namespace_packages: bool, 640 ) -> None: 641 self._loadconftestmodules( 642 anchor, 643 importmode, 644 rootpath, 645 consider_namespace_packages=consider_namespace_packages, 646 ) 647 # let's also consider test* subdirs 648 if anchor.is_dir(): 649 for x in anchor.glob("test*"): 650 if x.is_dir(): 651 self._loadconftestmodules( 652 x, 653 importmode, 654 rootpath, 655 consider_namespace_packages=consider_namespace_packages, 656 ) 657 658 def _loadconftestmodules( 659 self, 660 path: pathlib.Path, 661 importmode: str | ImportMode, 662 rootpath: pathlib.Path, 663 *, 664 consider_namespace_packages: bool, 665 ) -> None: 666 if self._noconftest: 667 return 668 669 directory = self._get_directory(path) 670 671 # Optimization: avoid repeated searches in the same directory. 672 # Assumes always called with same importmode and rootpath. 673 if directory in self._dirpath2confmods: 674 return 675 676 clist = [] 677 for parent in reversed((directory, *directory.parents)): 678 if self._is_in_confcutdir(parent): 679 conftestpath = parent / "conftest.py" 680 if conftestpath.is_file(): 681 mod = self._importconftest( 682 conftestpath, 683 importmode, 684 rootpath, 685 consider_namespace_packages=consider_namespace_packages, 686 ) 687 clist.append(mod) 688 self._dirpath2confmods[directory] = clist 689 690 def _getconftestmodules(self, path: pathlib.Path) -> Sequence[types.ModuleType]: 691 directory = self._get_directory(path) 692 return self._dirpath2confmods.get(directory, ()) 693 694 def _rget_with_confmod( 695 self, 696 name: str, 697 path: pathlib.Path, 698 ) -> tuple[types.ModuleType, Any]: 699 modules = self._getconftestmodules(path) 700 for mod in reversed(modules): 701 try: 702 return mod, getattr(mod, name) 703 except AttributeError: 704 continue 705 raise KeyError(name) 706 707 def _importconftest( 708 self, 709 conftestpath: pathlib.Path, 710 importmode: str | ImportMode, 711 rootpath: pathlib.Path, 712 *, 713 consider_namespace_packages: bool, 714 ) -> types.ModuleType: 715 conftestpath_plugin_name = str(conftestpath) 716 existing = self.get_plugin(conftestpath_plugin_name) 717 if existing is not None: 718 return cast(types.ModuleType, existing) 719 720 # conftest.py files there are not in a Python package all have module 721 # name "conftest", and thus conflict with each other. Clear the existing 722 # before loading the new one, otherwise the existing one will be 723 # returned from the module cache. 724 pkgpath = resolve_package_path(conftestpath) 725 if pkgpath is None: 726 try: 727 del sys.modules[conftestpath.stem] 728 except KeyError: 729 pass 730 731 try: 732 mod = import_path( 733 conftestpath, 734 mode=importmode, 735 root=rootpath, 736 consider_namespace_packages=consider_namespace_packages, 737 ) 738 except Exception as e: 739 assert e.__traceback__ is not None 740 raise ConftestImportFailure(conftestpath, cause=e) from e 741 742 self._check_non_top_pytest_plugins(mod, conftestpath) 743 744 self._conftest_plugins.add(mod) 745 dirpath = conftestpath.parent 746 if dirpath in self._dirpath2confmods: 747 for path, mods in self._dirpath2confmods.items(): 748 if dirpath in path.parents or path == dirpath: 749 if mod in mods: 750 raise AssertionError( 751 f"While trying to load conftest path {conftestpath!s}, " 752 f"found that the module {mod} is already loaded with path {mod.__file__}. " 753 "This is not supposed to happen. Please report this issue to pytest." 754 ) 755 mods.append(mod) 756 self.trace(f"loading conftestmodule {mod!r}") 757 self.consider_conftest(mod, registration_name=conftestpath_plugin_name) 758 return mod 759 760 def _check_non_top_pytest_plugins( 761 self, 762 mod: types.ModuleType, 763 conftestpath: pathlib.Path, 764 ) -> None: 765 if ( 766 hasattr(mod, "pytest_plugins") 767 and self._configured 768 and not self._using_pyargs 769 ): 770 msg = ( 771 "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported:\n" 772 "It affects the entire test suite instead of just below the conftest as expected.\n" 773 " {}\n" 774 "Please move it to a top level conftest file at the rootdir:\n" 775 " {}\n" 776 "For more information, visit:\n" 777 " https://docs.pytest.org/en/stable/deprecations.html#pytest-plugins-in-non-top-level-conftest-files" 778 ) 779 fail(msg.format(conftestpath, self._confcutdir), pytrace=False) 780 781 # 782 # API for bootstrapping plugin loading 783 # 784 # 785 786 def consider_preparse( 787 self, args: Sequence[str], *, exclude_only: bool = False 788 ) -> None: 789 """:meta private:""" 790 i = 0 791 n = len(args) 792 while i < n: 793 opt = args[i] 794 i += 1 795 if isinstance(opt, str): 796 if opt == "-p": 797 try: 798 parg = args[i] 799 except IndexError: 800 return 801 i += 1 802 elif opt.startswith("-p"): 803 parg = opt[2:] 804 else: 805 continue 806 parg = parg.strip() 807 if exclude_only and not parg.startswith("no:"): 808 continue 809 self.consider_pluginarg(parg) 810 811 def consider_pluginarg(self, arg: str) -> None: 812 """:meta private:""" 813 if arg.startswith("no:"): 814 name = arg[3:] 815 if name in essential_plugins: 816 raise UsageError(f"plugin {name} cannot be disabled") 817 818 if name.endswith("conftest.py"): 819 raise UsageError( 820 f"Blocking conftest files using -p is not supported: -p no:{name}\n" 821 "conftest.py files are not plugins and cannot be disabled via -p.\n" 822 ) 823 824 # PR #4304: remove stepwise if cacheprovider is blocked. 825 if name == "cacheprovider": 826 self.set_blocked("stepwise") 827 self.set_blocked("pytest_stepwise") 828 829 self.set_blocked(name) 830 if not name.startswith("pytest_"): 831 self.set_blocked("pytest_" + name) 832 else: 833 name = arg 834 # Unblock the plugin. 835 self.unblock(name) 836 if not name.startswith("pytest_"): 837 self.unblock("pytest_" + name) 838 self.import_plugin(arg, consider_entry_points=True) 839 840 def consider_conftest( 841 self, conftestmodule: types.ModuleType, registration_name: str 842 ) -> None: 843 """:meta private:""" 844 self.register(conftestmodule, name=registration_name) 845 846 def consider_env(self) -> None: 847 """:meta private:""" 848 self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) 849 850 def consider_module(self, mod: types.ModuleType) -> None: 851 """:meta private:""" 852 self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) 853 854 def _import_plugin_specs( 855 self, spec: None | types.ModuleType | str | Sequence[str] 856 ) -> None: 857 plugins = _get_plugin_specs_as_list(spec) 858 for import_spec in plugins: 859 self.import_plugin(import_spec) 860 861 def import_plugin(self, modname: str, consider_entry_points: bool = False) -> None: 862 """Import a plugin with ``modname``. 863 864 If ``consider_entry_points`` is True, entry point names are also 865 considered to find a plugin. 866 """ 867 # Most often modname refers to builtin modules, e.g. "pytester", 868 # "terminal" or "capture". Those plugins are registered under their 869 # basename for historic purposes but must be imported with the 870 # _pytest prefix. 871 assert isinstance(modname, str), ( 872 f"module name as text required, got {modname!r}" 873 ) 874 if self.is_blocked(modname) or self.get_plugin(modname) is not None: 875 return 876 877 importspec = "_pytest." + modname if modname in builtin_plugins else modname 878 self.rewrite_hook.mark_rewrite(importspec) 879 880 if consider_entry_points: 881 loaded = self.load_setuptools_entrypoints("pytest11", name=modname) 882 if loaded: 883 return 884 885 try: 886 __import__(importspec) 887 except ImportError as e: 888 raise ImportError( 889 f'Error importing plugin "{modname}": {e.args[0]}' 890 ).with_traceback(e.__traceback__) from e 891 892 except Skipped as e: 893 self.skipped_plugins.append((modname, e.msg or "")) 894 else: 895 mod = sys.modules[importspec] 896 self.register(mod, modname)
A pluggy.PluginManager <pluggy.PluginManager> with
additional pytest-specific functionality:
- Loading plugins from the command line,
PYTEST_PLUGINSenv variable andpytest_pluginsglobal variables found in plugins being loaded. conftest.pyloading during start-up.
473 def parse_hookimpl_opts( 474 self, plugin: _PluggyPlugin, name: str 475 ) -> HookimplOpts | None: 476 """:meta private:""" 477 # pytest hooks are always prefixed with "pytest_", 478 # so we avoid accessing possibly non-readable attributes 479 # (see issue #1073). 480 if not name.startswith("pytest_"): 481 return None 482 # Ignore names which cannot be hooks. 483 if name == "pytest_plugins": 484 return None 485 486 opts = super().parse_hookimpl_opts(plugin, name) 487 if opts is not None: 488 return opts 489 490 method = getattr(plugin, name) 491 # Consider only actual functions for hooks (#3775). 492 if not inspect.isroutine(method): 493 return None 494 # Collect unmarked hooks as long as they have the `pytest_' prefix. 495 legacy = _get_legacy_hook_marks( 496 method, "impl", ("tryfirst", "trylast", "optionalhook", "hookwrapper") 497 ) 498 return cast(HookimplOpts, legacy)
:meta private:
500 def parse_hookspec_opts(self, module_or_class, name: str) -> HookspecOpts | None: 501 """:meta private:""" 502 opts = super().parse_hookspec_opts(module_or_class, name) 503 if opts is None: 504 method = getattr(module_or_class, name) 505 if name.startswith("pytest_"): 506 legacy = _get_legacy_hook_marks( 507 method, "spec", ("firstresult", "historic") 508 ) 509 opts = cast(HookspecOpts, legacy) 510 return opts
:meta private:
512 def register(self, plugin: _PluggyPlugin, name: str | None = None) -> str | None: 513 if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS: 514 warnings.warn( 515 PytestConfigWarning( 516 "{} plugin has been merged into the core, " 517 "please remove it from your requirements.".format( 518 name.replace("_", "-") 519 ) 520 ) 521 ) 522 return None 523 plugin_name = super().register(plugin, name) 524 if plugin_name is not None: 525 self.hook.pytest_plugin_registered.call_historic( 526 kwargs=dict( 527 plugin=plugin, 528 plugin_name=plugin_name, 529 manager=self, 530 ) 531 ) 532 533 if isinstance(plugin, types.ModuleType): 534 self.consider_module(plugin) 535 return plugin_name
Register a plugin and return its name.
Parameters
- name:
The name under which to register the plugin. If not specified, a
name is generated using
get_canonical_name().
:returns:
The plugin name. If the name is blocked from registering, returns
None.
If the plugin is already registered, raises a ValueError.
542 def hasplugin(self, name: str) -> bool: 543 """Return whether a plugin with the given name is registered.""" 544 return bool(self.get_plugin(name))
Return whether a plugin with the given name is registered.
546 def pytest_configure(self, config: Config) -> None: 547 """:meta private:""" 548 # XXX now that the pluginmanager exposes hookimpl(tryfirst...) 549 # we should remove tryfirst/trylast as markers. 550 config.addinivalue_line( 551 "markers", 552 "tryfirst: mark a hook implementation function such that the " 553 "plugin machinery will try to call it first/as early as possible. " 554 "DEPRECATED, use @pytest.hookimpl(tryfirst=True) instead.", 555 ) 556 config.addinivalue_line( 557 "markers", 558 "trylast: mark a hook implementation function such that the " 559 "plugin machinery will try to call it last/as late as possible. " 560 "DEPRECATED, use @pytest.hookimpl(trylast=True) instead.", 561 ) 562 self._configured = True
:meta private:
786 def consider_preparse( 787 self, args: Sequence[str], *, exclude_only: bool = False 788 ) -> None: 789 """:meta private:""" 790 i = 0 791 n = len(args) 792 while i < n: 793 opt = args[i] 794 i += 1 795 if isinstance(opt, str): 796 if opt == "-p": 797 try: 798 parg = args[i] 799 except IndexError: 800 return 801 i += 1 802 elif opt.startswith("-p"): 803 parg = opt[2:] 804 else: 805 continue 806 parg = parg.strip() 807 if exclude_only and not parg.startswith("no:"): 808 continue 809 self.consider_pluginarg(parg)
:meta private:
811 def consider_pluginarg(self, arg: str) -> None: 812 """:meta private:""" 813 if arg.startswith("no:"): 814 name = arg[3:] 815 if name in essential_plugins: 816 raise UsageError(f"plugin {name} cannot be disabled") 817 818 if name.endswith("conftest.py"): 819 raise UsageError( 820 f"Blocking conftest files using -p is not supported: -p no:{name}\n" 821 "conftest.py files are not plugins and cannot be disabled via -p.\n" 822 ) 823 824 # PR #4304: remove stepwise if cacheprovider is blocked. 825 if name == "cacheprovider": 826 self.set_blocked("stepwise") 827 self.set_blocked("pytest_stepwise") 828 829 self.set_blocked(name) 830 if not name.startswith("pytest_"): 831 self.set_blocked("pytest_" + name) 832 else: 833 name = arg 834 # Unblock the plugin. 835 self.unblock(name) 836 if not name.startswith("pytest_"): 837 self.unblock("pytest_" + name) 838 self.import_plugin(arg, consider_entry_points=True)
:meta private:
840 def consider_conftest( 841 self, conftestmodule: types.ModuleType, registration_name: str 842 ) -> None: 843 """:meta private:""" 844 self.register(conftestmodule, name=registration_name)
:meta private:
846 def consider_env(self) -> None: 847 """:meta private:""" 848 self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS"))
:meta private:
850 def consider_module(self, mod: types.ModuleType) -> None: 851 """:meta private:""" 852 self._import_plugin_specs(getattr(mod, "pytest_plugins", []))
:meta private:
861 def import_plugin(self, modname: str, consider_entry_points: bool = False) -> None: 862 """Import a plugin with ``modname``. 863 864 If ``consider_entry_points`` is True, entry point names are also 865 considered to find a plugin. 866 """ 867 # Most often modname refers to builtin modules, e.g. "pytester", 868 # "terminal" or "capture". Those plugins are registered under their 869 # basename for historic purposes but must be imported with the 870 # _pytest prefix. 871 assert isinstance(modname, str), ( 872 f"module name as text required, got {modname!r}" 873 ) 874 if self.is_blocked(modname) or self.get_plugin(modname) is not None: 875 return 876 877 importspec = "_pytest." + modname if modname in builtin_plugins else modname 878 self.rewrite_hook.mark_rewrite(importspec) 879 880 if consider_entry_points: 881 loaded = self.load_setuptools_entrypoints("pytest11", name=modname) 882 if loaded: 883 return 884 885 try: 886 __import__(importspec) 887 except ImportError as e: 888 raise ImportError( 889 f'Error importing plugin "{modname}": {e.args[0]}' 890 ).with_traceback(e.__traceback__) from e 891 892 except Skipped as e: 893 self.skipped_plugins.append((modname, e.msg or "")) 894 else: 895 mod = sys.modules[importspec] 896 self.register(mod, modname)
Import a plugin with modname.
If consider_entry_points is True, entry point names are also
considered to find a plugin.
54from _pytest.pytester import LineMatcher
Warning class for features that will be removed in pytest 9.
60from _pytest.python import Metafunc
Warning class for features that will be removed in pytest 10.
81from _pytest.warning_types import PytestCacheWarning
Warning emitted when a test function returns a value other than None.
See :ref:return-not-none for details.
114 "FixtureDef",
An unhandled exception occurred in a ~threading.Thread.
Such exceptions don't propagate normally.
92from _pytest.warning_types import PytestUnraisableExceptionWarning
Warning emitted on use of unknown markers.
See :ref:mark for details.
102 "CallInfo",
An unraisable exception was reported.
Unraisable exceptions are exceptions raised in __del__ <object.__del__>()
implementations and similar situations when the exception cannot be raised
as normal.
14from _pytest.config import Config
Base class for all warnings emitted by pytest.
649@final 650class Pytester: 651 """ 652 Facilities to write tests/configuration files, execute pytest in isolation, and match 653 against expected output, perfect for black-box testing of pytest plugins. 654 655 It attempts to isolate the test run from external factors as much as possible, modifying 656 the current working directory to :attr:`path` and environment variables during initialization. 657 """ 658 659 __test__ = False 660 661 CLOSE_STDIN: Final = NOTSET 662 663 class TimeoutExpired(Exception): 664 pass 665 666 def __init__( 667 self, 668 request: FixtureRequest, 669 tmp_path_factory: TempPathFactory, 670 monkeypatch: MonkeyPatch, 671 *, 672 _ispytest: bool = False, 673 ) -> None: 674 check_ispytest(_ispytest) 675 self._request = request 676 self._mod_collections: WeakKeyDictionary[Collector, list[Item | Collector]] = ( 677 WeakKeyDictionary() 678 ) 679 if request.function: 680 name: str = request.function.__name__ 681 else: 682 name = request.node.name 683 self._name = name 684 self._path: Path = tmp_path_factory.mktemp(name, numbered=True) 685 #: A list of plugins to use with :py:meth:`parseconfig` and 686 #: :py:meth:`runpytest`. Initially this is an empty list but plugins can 687 #: be added to the list. 688 #: 689 #: When running in subprocess mode, specify plugins by name (str) - adding 690 #: plugin objects directly is not supported. 691 self.plugins: list[str | _PluggyPlugin] = [] 692 self._sys_path_snapshot = SysPathsSnapshot() 693 self._sys_modules_snapshot = self.__take_sys_modules_snapshot() 694 self._request.addfinalizer(self._finalize) 695 self._method = self._request.config.getoption("--runpytest") 696 self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True) 697 698 self._monkeypatch = mp = monkeypatch 699 self.chdir() 700 mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot)) 701 # Ensure no unexpected caching via tox. 702 mp.delenv("TOX_ENV_DIR", raising=False) 703 # Discard outer pytest options. 704 mp.delenv("PYTEST_ADDOPTS", raising=False) 705 # Ensure no user config is used. 706 tmphome = str(self.path) 707 mp.setenv("HOME", tmphome) 708 mp.setenv("USERPROFILE", tmphome) 709 # Do not use colors for inner runs by default. 710 mp.setenv("PY_COLORS", "0") 711 712 @property 713 def path(self) -> Path: 714 """Temporary directory path used to create files/run tests from, etc.""" 715 return self._path 716 717 def __repr__(self) -> str: 718 return f"<Pytester {self.path!r}>" 719 720 def _finalize(self) -> None: 721 """ 722 Clean up global state artifacts. 723 724 Some methods modify the global interpreter state and this tries to 725 clean this up. It does not remove the temporary directory however so 726 it can be looked at after the test run has finished. 727 """ 728 self._sys_modules_snapshot.restore() 729 self._sys_path_snapshot.restore() 730 731 def __take_sys_modules_snapshot(self) -> SysModulesSnapshot: 732 # Some zope modules used by twisted-related tests keep internal state 733 # and can't be deleted; we had some trouble in the past with 734 # `zope.interface` for example. 735 # 736 # Preserve readline due to https://bugs.python.org/issue41033. 737 # pexpect issues a SIGWINCH. 738 def preserve_module(name): 739 return name.startswith(("zope", "readline")) 740 741 return SysModulesSnapshot(preserve=preserve_module) 742 743 def make_hook_recorder(self, pluginmanager: PytestPluginManager) -> HookRecorder: 744 """Create a new :class:`HookRecorder` for a :class:`PytestPluginManager`.""" 745 pluginmanager.reprec = reprec = HookRecorder(pluginmanager, _ispytest=True) # type: ignore[attr-defined] 746 self._request.addfinalizer(reprec.finish_recording) 747 return reprec 748 749 def chdir(self) -> None: 750 """Cd into the temporary directory. 751 752 This is done automatically upon instantiation. 753 """ 754 self._monkeypatch.chdir(self.path) 755 756 def _makefile( 757 self, 758 ext: str, 759 lines: Sequence[Any | bytes], 760 files: dict[str, str], 761 encoding: str = "utf-8", 762 ) -> Path: 763 items = list(files.items()) 764 765 if ext is None: 766 raise TypeError("ext must not be None") 767 768 if ext and not ext.startswith("."): 769 raise ValueError( 770 f"pytester.makefile expects a file extension, try .{ext} instead of {ext}" 771 ) 772 773 def to_text(s: Any | bytes) -> str: 774 return s.decode(encoding) if isinstance(s, bytes) else str(s) 775 776 if lines: 777 source = "\n".join(to_text(x) for x in lines) 778 basename = self._name 779 items.insert(0, (basename, source)) 780 781 ret = None 782 for basename, value in items: 783 p = self.path.joinpath(basename).with_suffix(ext) 784 p.parent.mkdir(parents=True, exist_ok=True) 785 source_ = Source(value) 786 source = "\n".join(to_text(line) for line in source_.lines) 787 p.write_text(source.strip(), encoding=encoding) 788 if ret is None: 789 ret = p 790 assert ret is not None 791 return ret 792 793 def makefile(self, ext: str, *args: str, **kwargs: str) -> Path: 794 r"""Create new text file(s) in the test directory. 795 796 :param ext: 797 The extension the file(s) should use, including the dot, e.g. `.py`. 798 :param args: 799 All args are treated as strings and joined using newlines. 800 The result is written as contents to the file. The name of the 801 file is based on the test function requesting this fixture. 802 :param kwargs: 803 Each keyword is the name of a file, while the value of it will 804 be written as contents of the file. 805 :returns: 806 The first created file. 807 808 Examples: 809 810 .. code-block:: python 811 812 pytester.makefile(".txt", "line1", "line2") 813 814 pytester.makefile(".ini", pytest="[pytest]\naddopts=-rs\n") 815 816 To create binary files, use :meth:`pathlib.Path.write_bytes` directly: 817 818 .. code-block:: python 819 820 filename = pytester.path.joinpath("foo.bin") 821 filename.write_bytes(b"...") 822 """ 823 return self._makefile(ext, args, kwargs) 824 825 def makeconftest(self, source: str) -> Path: 826 """Write a conftest.py file. 827 828 :param source: The contents. 829 :returns: The conftest.py file. 830 """ 831 return self.makepyfile(conftest=source) 832 833 def makeini(self, source: str) -> Path: 834 """Write a tox.ini file. 835 836 :param source: The contents. 837 :returns: The tox.ini file. 838 """ 839 return self.makefile(".ini", tox=source) 840 841 def maketoml(self, source: str) -> Path: 842 """Write a pytest.toml file. 843 844 :param source: The contents. 845 :returns: The pytest.toml file. 846 847 .. versionadded:: 9.0 848 """ 849 return self.makefile(".toml", pytest=source) 850 851 def getinicfg(self, source: str) -> SectionWrapper: 852 """Return the pytest section from the tox.ini config file.""" 853 p = self.makeini(source) 854 return IniConfig(str(p))["pytest"] 855 856 def makepyprojecttoml(self, source: str) -> Path: 857 """Write a pyproject.toml file. 858 859 :param source: The contents. 860 :returns: The pyproject.ini file. 861 862 .. versionadded:: 6.0 863 """ 864 return self.makefile(".toml", pyproject=source) 865 866 def makepyfile(self, *args, **kwargs) -> Path: 867 r"""Shortcut for .makefile() with a .py extension. 868 869 Defaults to the test name with a '.py' extension, e.g test_foobar.py, overwriting 870 existing files. 871 872 Examples: 873 874 .. code-block:: python 875 876 def test_something(pytester): 877 # Initial file is created test_something.py. 878 pytester.makepyfile("foobar") 879 # To create multiple files, pass kwargs accordingly. 880 pytester.makepyfile(custom="foobar") 881 # At this point, both 'test_something.py' & 'custom.py' exist in the test directory. 882 883 """ 884 return self._makefile(".py", args, kwargs) 885 886 def maketxtfile(self, *args, **kwargs) -> Path: 887 r"""Shortcut for .makefile() with a .txt extension. 888 889 Defaults to the test name with a '.txt' extension, e.g test_foobar.txt, overwriting 890 existing files. 891 892 Examples: 893 894 .. code-block:: python 895 896 def test_something(pytester): 897 # Initial file is created test_something.txt. 898 pytester.maketxtfile("foobar") 899 # To create multiple files, pass kwargs accordingly. 900 pytester.maketxtfile(custom="foobar") 901 # At this point, both 'test_something.txt' & 'custom.txt' exist in the test directory. 902 903 """ 904 return self._makefile(".txt", args, kwargs) 905 906 def syspathinsert(self, path: str | os.PathLike[str] | None = None) -> None: 907 """Prepend a directory to sys.path, defaults to :attr:`path`. 908 909 This is undone automatically when this object dies at the end of each 910 test. 911 912 :param path: 913 The path. 914 """ 915 if path is None: 916 path = self.path 917 918 self._monkeypatch.syspath_prepend(str(path)) 919 920 def mkdir(self, name: str | os.PathLike[str]) -> Path: 921 """Create a new (sub)directory. 922 923 :param name: 924 The name of the directory, relative to the pytester path. 925 :returns: 926 The created directory. 927 :rtype: pathlib.Path 928 """ 929 p = self.path / name 930 p.mkdir() 931 return p 932 933 def mkpydir(self, name: str | os.PathLike[str]) -> Path: 934 """Create a new python package. 935 936 This creates a (sub)directory with an empty ``__init__.py`` file so it 937 gets recognised as a Python package. 938 """ 939 p = self.path / name 940 p.mkdir() 941 p.joinpath("__init__.py").touch() 942 return p 943 944 def copy_example(self, name: str | None = None) -> Path: 945 """Copy file from project's directory into the testdir. 946 947 :param name: 948 The name of the file to copy. 949 :return: 950 Path to the copied directory (inside ``self.path``). 951 :rtype: pathlib.Path 952 """ 953 example_dir_ = self._request.config.getini("pytester_example_dir") 954 if example_dir_ is None: 955 raise ValueError("pytester_example_dir is unset, can't copy examples") 956 example_dir: Path = self._request.config.rootpath / example_dir_ 957 958 for extra_element in self._request.node.iter_markers("pytester_example_path"): 959 assert extra_element.args 960 example_dir = example_dir.joinpath(*extra_element.args) 961 962 if name is None: 963 func_name = self._name 964 maybe_dir = example_dir / func_name 965 maybe_file = example_dir / (func_name + ".py") 966 967 if maybe_dir.is_dir(): 968 example_path = maybe_dir 969 elif maybe_file.is_file(): 970 example_path = maybe_file 971 else: 972 raise LookupError( 973 f"{func_name} can't be found as module or package in {example_dir}" 974 ) 975 else: 976 example_path = example_dir.joinpath(name) 977 978 if example_path.is_dir() and not example_path.joinpath("__init__.py").is_file(): 979 shutil.copytree(example_path, self.path, symlinks=True, dirs_exist_ok=True) 980 return self.path 981 elif example_path.is_file(): 982 result = self.path.joinpath(example_path.name) 983 shutil.copy(example_path, result) 984 return result 985 else: 986 raise LookupError( 987 f'example "{example_path}" is not found as a file or directory' 988 ) 989 990 def getnode(self, config: Config, arg: str | os.PathLike[str]) -> Collector | Item: 991 """Get the collection node of a file. 992 993 :param config: 994 A pytest config. 995 See :py:meth:`parseconfig` and :py:meth:`parseconfigure` for creating it. 996 :param arg: 997 Path to the file. 998 :returns: 999 The node. 1000 """ 1001 session = Session.from_config(config) 1002 assert "::" not in str(arg) 1003 p = Path(os.path.abspath(arg)) 1004 config.hook.pytest_sessionstart(session=session) 1005 res = session.perform_collect([str(p)], genitems=False)[0] 1006 config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) 1007 return res 1008 1009 def getpathnode(self, path: str | os.PathLike[str]) -> Collector | Item: 1010 """Return the collection node of a file. 1011 1012 This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to 1013 create the (configured) pytest Config instance. 1014 1015 :param path: 1016 Path to the file. 1017 :returns: 1018 The node. 1019 """ 1020 path = Path(path) 1021 config = self.parseconfigure(path) 1022 session = Session.from_config(config) 1023 x = bestrelpath(session.path, path) 1024 config.hook.pytest_sessionstart(session=session) 1025 res = session.perform_collect([x], genitems=False)[0] 1026 config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) 1027 return res 1028 1029 def genitems(self, colitems: Sequence[Item | Collector]) -> list[Item]: 1030 """Generate all test items from a collection node. 1031 1032 This recurses into the collection node and returns a list of all the 1033 test items contained within. 1034 1035 :param colitems: 1036 The collection nodes. 1037 :returns: 1038 The collected items. 1039 """ 1040 session = colitems[0].session 1041 result: list[Item] = [] 1042 for colitem in colitems: 1043 result.extend(session.genitems(colitem)) 1044 return result 1045 1046 def runitem(self, source: str) -> Any: 1047 """Run the "test_func" Item. 1048 1049 The calling test instance (class containing the test method) must 1050 provide a ``.getrunner()`` method which should return a runner which 1051 can run the test protocol for a single item, e.g. 1052 ``_pytest.runner.runtestprotocol``. 1053 """ 1054 # used from runner functional tests 1055 item = self.getitem(source) 1056 # the test class where we are called from wants to provide the runner 1057 testclassinstance = self._request.instance 1058 runner = testclassinstance.getrunner() 1059 return runner(item) 1060 1061 def inline_runsource(self, source: str, *cmdlineargs) -> HookRecorder: 1062 """Run a test module in process using ``pytest.main()``. 1063 1064 This run writes "source" into a temporary file and runs 1065 ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance 1066 for the result. 1067 1068 :param source: The source code of the test module. 1069 :param cmdlineargs: Any extra command line arguments to use. 1070 """ 1071 p = self.makepyfile(source) 1072 values = [*list(cmdlineargs), p] 1073 return self.inline_run(*values) 1074 1075 def inline_genitems(self, *args) -> tuple[list[Item], HookRecorder]: 1076 """Run ``pytest.main(['--collect-only'])`` in-process. 1077 1078 Runs the :py:func:`pytest.main` function to run all of pytest inside 1079 the test process itself like :py:meth:`inline_run`, but returns a 1080 tuple of the collected items and a :py:class:`HookRecorder` instance. 1081 """ 1082 rec = self.inline_run("--collect-only", *args) 1083 items = [x.item for x in rec.getcalls("pytest_itemcollected")] 1084 return items, rec 1085 1086 def inline_run( 1087 self, 1088 *args: str | os.PathLike[str], 1089 plugins=(), 1090 no_reraise_ctrlc: bool = False, 1091 ) -> HookRecorder: 1092 """Run ``pytest.main()`` in-process, returning a HookRecorder. 1093 1094 Runs the :py:func:`pytest.main` function to run all of pytest inside 1095 the test process itself. This means it can return a 1096 :py:class:`HookRecorder` instance which gives more detailed results 1097 from that run than can be done by matching stdout/stderr from 1098 :py:meth:`runpytest`. 1099 1100 :param args: 1101 Command line arguments to pass to :py:func:`pytest.main`. 1102 :param plugins: 1103 Extra plugin instances the ``pytest.main()`` instance should use. 1104 :param no_reraise_ctrlc: 1105 Typically we reraise keyboard interrupts from the child run. If 1106 True, the KeyboardInterrupt exception is captured. 1107 """ 1108 from _pytest.unraisableexception import gc_collect_iterations_key 1109 1110 # (maybe a cpython bug?) the importlib cache sometimes isn't updated 1111 # properly between file creation and inline_run (especially if imports 1112 # are interspersed with file creation) 1113 importlib.invalidate_caches() 1114 1115 plugins = list(plugins) 1116 finalizers = [] 1117 try: 1118 # Any sys.module or sys.path changes done while running pytest 1119 # inline should be reverted after the test run completes to avoid 1120 # clashing with later inline tests run within the same pytest test, 1121 # e.g. just because they use matching test module names. 1122 finalizers.append(self.__take_sys_modules_snapshot().restore) 1123 finalizers.append(SysPathsSnapshot().restore) 1124 1125 # Important note: 1126 # - our tests should not leave any other references/registrations 1127 # laying around other than possibly loaded test modules 1128 # referenced from sys.modules, as nothing will clean those up 1129 # automatically 1130 1131 rec = [] 1132 1133 class PytesterHelperPlugin: 1134 @staticmethod 1135 def pytest_configure(config: Config) -> None: 1136 rec.append(self.make_hook_recorder(config.pluginmanager)) 1137 1138 # The unraisable plugin GC collect slows down inline 1139 # pytester runs too much. 1140 config.stash[gc_collect_iterations_key] = 0 1141 1142 plugins.append(PytesterHelperPlugin()) 1143 ret = main([str(x) for x in args], plugins=plugins) 1144 if len(rec) == 1: 1145 reprec = rec.pop() 1146 else: 1147 1148 class reprec: # type: ignore 1149 pass 1150 1151 reprec.ret = ret 1152 1153 # Typically we reraise keyboard interrupts from the child run 1154 # because it's our user requesting interruption of the testing. 1155 if ret == ExitCode.INTERRUPTED and not no_reraise_ctrlc: 1156 calls = reprec.getcalls("pytest_keyboard_interrupt") 1157 if calls and calls[-1].excinfo.type == KeyboardInterrupt: 1158 raise KeyboardInterrupt() 1159 return reprec 1160 finally: 1161 for finalizer in finalizers: 1162 finalizer() 1163 1164 def runpytest_inprocess( 1165 self, *args: str | os.PathLike[str], **kwargs: Any 1166 ) -> RunResult: 1167 """Return result of running pytest in-process, providing a similar 1168 interface to what self.runpytest() provides.""" 1169 syspathinsert = kwargs.pop("syspathinsert", False) 1170 1171 if syspathinsert: 1172 self.syspathinsert() 1173 instant = timing.Instant() 1174 capture = _get_multicapture("sys") 1175 capture.start_capturing() 1176 try: 1177 try: 1178 reprec = self.inline_run(*args, **kwargs) 1179 except SystemExit as e: 1180 ret = e.args[0] 1181 try: 1182 ret = ExitCode(e.args[0]) 1183 except ValueError: 1184 pass 1185 1186 class reprec: # type: ignore 1187 ret = ret 1188 1189 except Exception: 1190 traceback.print_exc() 1191 1192 class reprec: # type: ignore 1193 ret = ExitCode(3) 1194 1195 finally: 1196 out, err = capture.readouterr() 1197 capture.stop_capturing() 1198 sys.stdout.write(out) 1199 sys.stderr.write(err) 1200 1201 assert reprec.ret is not None 1202 res = RunResult( 1203 reprec.ret, out.splitlines(), err.splitlines(), instant.elapsed().seconds 1204 ) 1205 res.reprec = reprec # type: ignore 1206 return res 1207 1208 def runpytest(self, *args: str | os.PathLike[str], **kwargs: Any) -> RunResult: 1209 """Run pytest inline or in a subprocess, depending on the command line 1210 option "--runpytest" and return a :py:class:`~pytest.RunResult`.""" 1211 new_args = self._ensure_basetemp(args) 1212 if self._method == "inprocess": 1213 return self.runpytest_inprocess(*new_args, **kwargs) 1214 elif self._method == "subprocess": 1215 return self.runpytest_subprocess(*new_args, **kwargs) 1216 raise RuntimeError(f"Unrecognized runpytest option: {self._method}") 1217 1218 def _ensure_basetemp( 1219 self, args: Sequence[str | os.PathLike[str]] 1220 ) -> list[str | os.PathLike[str]]: 1221 new_args = list(args) 1222 for x in new_args: 1223 if str(x).startswith("--basetemp"): 1224 break 1225 else: 1226 new_args.append( 1227 "--basetemp={}".format(self.path.parent.joinpath("basetemp")) 1228 ) 1229 return new_args 1230 1231 def parseconfig(self, *args: str | os.PathLike[str]) -> Config: 1232 """Return a new pytest :class:`pytest.Config` instance from given 1233 commandline args. 1234 1235 This invokes the pytest bootstrapping code in _pytest.config to create a 1236 new :py:class:`pytest.PytestPluginManager` and call the 1237 :hook:`pytest_cmdline_parse` hook to create a new :class:`pytest.Config` 1238 instance. 1239 1240 If :attr:`plugins` has been populated they should be plugin modules 1241 to be registered with the plugin manager. 1242 """ 1243 import _pytest.config 1244 1245 new_args = [str(x) for x in self._ensure_basetemp(args)] 1246 1247 config = _pytest.config._prepareconfig(new_args, self.plugins) 1248 # we don't know what the test will do with this half-setup config 1249 # object and thus we make sure it gets unconfigured properly in any 1250 # case (otherwise capturing could still be active, for example) 1251 self._request.addfinalizer(config._ensure_unconfigure) 1252 return config 1253 1254 def parseconfigure(self, *args: str | os.PathLike[str]) -> Config: 1255 """Return a new pytest configured Config instance. 1256 1257 Returns a new :py:class:`pytest.Config` instance like 1258 :py:meth:`parseconfig`, but also calls the :hook:`pytest_configure` 1259 hook. 1260 """ 1261 config = self.parseconfig(*args) 1262 config._do_configure() 1263 return config 1264 1265 def getitem( 1266 self, source: str | os.PathLike[str], funcname: str = "test_func" 1267 ) -> Item: 1268 """Return the test item for a test function. 1269 1270 Writes the source to a python file and runs pytest's collection on 1271 the resulting module, returning the test item for the requested 1272 function name. 1273 1274 :param source: 1275 The module source. 1276 :param funcname: 1277 The name of the test function for which to return a test item. 1278 :returns: 1279 The test item. 1280 """ 1281 items = self.getitems(source) 1282 for item in items: 1283 if item.name == funcname: 1284 return item 1285 assert 0, f"{funcname!r} item not found in module:\n{source}\nitems: {items}" 1286 1287 def getitems(self, source: str | os.PathLike[str]) -> list[Item]: 1288 """Return all test items collected from the module. 1289 1290 Writes the source to a Python file and runs pytest's collection on 1291 the resulting module, returning all test items contained within. 1292 """ 1293 modcol = self.getmodulecol(source) 1294 return self.genitems([modcol]) 1295 1296 def getmodulecol( 1297 self, 1298 source: str | os.PathLike[str], 1299 configargs=(), 1300 *, 1301 withinit: bool = False, 1302 ): 1303 """Return the module collection node for ``source``. 1304 1305 Writes ``source`` to a file using :py:meth:`makepyfile` and then 1306 runs the pytest collection on it, returning the collection node for the 1307 test module. 1308 1309 :param source: 1310 The source code of the module to collect. 1311 1312 :param configargs: 1313 Any extra arguments to pass to :py:meth:`parseconfigure`. 1314 1315 :param withinit: 1316 Whether to also write an ``__init__.py`` file to the same 1317 directory to ensure it is a package. 1318 """ 1319 if isinstance(source, os.PathLike): 1320 path = self.path.joinpath(source) 1321 assert not withinit, "not supported for paths" 1322 else: 1323 kw = {self._name: str(source)} 1324 path = self.makepyfile(**kw) 1325 if withinit: 1326 self.makepyfile(__init__="#") 1327 self.config = config = self.parseconfigure(path, *configargs) 1328 return self.getnode(config, path) 1329 1330 def collect_by_name(self, modcol: Collector, name: str) -> Item | Collector | None: 1331 """Return the collection node for name from the module collection. 1332 1333 Searches a module collection node for a collection node matching the 1334 given name. 1335 1336 :param modcol: A module collection node; see :py:meth:`getmodulecol`. 1337 :param name: The name of the node to return. 1338 """ 1339 if modcol not in self._mod_collections: 1340 self._mod_collections[modcol] = list(modcol.collect()) 1341 for colitem in self._mod_collections[modcol]: 1342 if colitem.name == name: 1343 return colitem 1344 return None 1345 1346 def popen( 1347 self, 1348 cmdargs: Sequence[str | os.PathLike[str]], 1349 stdout: int | TextIO = subprocess.PIPE, 1350 stderr: int | TextIO = subprocess.PIPE, 1351 stdin: NotSetType | bytes | IO[Any] | int = CLOSE_STDIN, 1352 **kw, 1353 ): 1354 """Invoke :py:class:`subprocess.Popen`. 1355 1356 Calls :py:class:`subprocess.Popen` making sure the current working 1357 directory is in ``PYTHONPATH``. 1358 1359 You probably want to use :py:meth:`run` instead. 1360 """ 1361 env = os.environ.copy() 1362 env["PYTHONPATH"] = os.pathsep.join( 1363 filter(None, [os.getcwd(), env.get("PYTHONPATH", "")]) 1364 ) 1365 kw["env"] = env 1366 1367 if stdin is self.CLOSE_STDIN: 1368 kw["stdin"] = subprocess.PIPE 1369 elif isinstance(stdin, bytes): 1370 kw["stdin"] = subprocess.PIPE 1371 else: 1372 kw["stdin"] = stdin 1373 1374 popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) 1375 if stdin is self.CLOSE_STDIN: 1376 assert popen.stdin is not None 1377 popen.stdin.close() 1378 elif isinstance(stdin, bytes): 1379 assert popen.stdin is not None 1380 popen.stdin.write(stdin) 1381 1382 return popen 1383 1384 def run( 1385 self, 1386 *cmdargs: str | os.PathLike[str], 1387 timeout: float | None = None, 1388 stdin: NotSetType | bytes | IO[Any] | int = CLOSE_STDIN, 1389 ) -> RunResult: 1390 """Run a command with arguments. 1391 1392 Run a process using :py:class:`subprocess.Popen` saving the stdout and 1393 stderr. 1394 1395 :param cmdargs: 1396 The sequence of arguments to pass to :py:class:`subprocess.Popen`, 1397 with path-like objects being converted to :py:class:`str` 1398 automatically. 1399 :param timeout: 1400 The period in seconds after which to timeout and raise 1401 :py:class:`Pytester.TimeoutExpired`. 1402 :param stdin: 1403 Optional standard input. 1404 1405 - If it is ``CLOSE_STDIN`` (Default), then this method calls 1406 :py:class:`subprocess.Popen` with ``stdin=subprocess.PIPE``, and 1407 the standard input is closed immediately after the new command is 1408 started. 1409 1410 - If it is of type :py:class:`bytes`, these bytes are sent to the 1411 standard input of the command. 1412 1413 - Otherwise, it is passed through to :py:class:`subprocess.Popen`. 1414 For further information in this case, consult the document of the 1415 ``stdin`` parameter in :py:class:`subprocess.Popen`. 1416 :type stdin: _pytest.compat.NotSetType | bytes | IO[Any] | int 1417 :returns: 1418 The result. 1419 1420 """ 1421 __tracebackhide__ = True 1422 1423 cmdargs = tuple(os.fspath(arg) for arg in cmdargs) 1424 p1 = self.path.joinpath("stdout") 1425 p2 = self.path.joinpath("stderr") 1426 print("running:", *cmdargs) 1427 print(" in:", Path.cwd()) 1428 1429 with p1.open("w", encoding="utf8") as f1, p2.open("w", encoding="utf8") as f2: 1430 instant = timing.Instant() 1431 popen = self.popen( 1432 cmdargs, 1433 stdin=stdin, 1434 stdout=f1, 1435 stderr=f2, 1436 ) 1437 if popen.stdin is not None: 1438 popen.stdin.close() 1439 1440 def handle_timeout() -> None: 1441 __tracebackhide__ = True 1442 1443 timeout_message = f"{timeout} second timeout expired running: {cmdargs}" 1444 1445 popen.kill() 1446 popen.wait() 1447 raise self.TimeoutExpired(timeout_message) 1448 1449 if timeout is None: 1450 ret = popen.wait() 1451 else: 1452 try: 1453 ret = popen.wait(timeout) 1454 except subprocess.TimeoutExpired: 1455 handle_timeout() 1456 f1.flush() 1457 f2.flush() 1458 1459 with p1.open(encoding="utf8") as f1, p2.open(encoding="utf8") as f2: 1460 out = f1.read().splitlines() 1461 err = f2.read().splitlines() 1462 1463 self._dump_lines(out, sys.stdout) 1464 self._dump_lines(err, sys.stderr) 1465 1466 with contextlib.suppress(ValueError): 1467 ret = ExitCode(ret) 1468 return RunResult(ret, out, err, instant.elapsed().seconds) 1469 1470 def _dump_lines(self, lines, fp): 1471 try: 1472 for line in lines: 1473 print(line, file=fp) 1474 except UnicodeEncodeError: 1475 print(f"couldn't print to {fp} because of encoding") 1476 1477 def _getpytestargs(self) -> tuple[str, ...]: 1478 return sys.executable, "-mpytest" 1479 1480 def runpython(self, script: os.PathLike[str]) -> RunResult: 1481 """Run a python script using sys.executable as interpreter.""" 1482 return self.run(sys.executable, script) 1483 1484 def runpython_c(self, command: str) -> RunResult: 1485 """Run ``python -c "command"``.""" 1486 return self.run(sys.executable, "-c", command) 1487 1488 def runpytest_subprocess( 1489 self, *args: str | os.PathLike[str], timeout: float | None = None 1490 ) -> RunResult: 1491 """Run pytest as a subprocess with given arguments. 1492 1493 Any plugins added to the :py:attr:`plugins` list will be added using the 1494 ``-p`` command line option. Additionally ``--basetemp`` is used to put 1495 any temporary files and directories in a numbered directory prefixed 1496 with "runpytest-" to not conflict with the normal numbered pytest 1497 location for temporary files and directories. 1498 1499 :param args: 1500 The sequence of arguments to pass to the pytest subprocess. 1501 :param timeout: 1502 The period in seconds after which to timeout and raise 1503 :py:class:`Pytester.TimeoutExpired`. 1504 :returns: 1505 The result. 1506 """ 1507 __tracebackhide__ = True 1508 p = make_numbered_dir(root=self.path, prefix="runpytest-", mode=0o700) 1509 args = (f"--basetemp={p}", *args) 1510 for plugin in self.plugins: 1511 if not isinstance(plugin, str): 1512 raise ValueError( 1513 f"Specifying plugins as objects is not supported in pytester subprocess mode; " 1514 f"specify by name instead: {plugin}" 1515 ) 1516 args = ("-p", plugin, *args) 1517 args = self._getpytestargs() + args 1518 return self.run(*args, timeout=timeout) 1519 1520 def spawn_pytest(self, string: str, expect_timeout: float = 10.0) -> pexpect.spawn: 1521 """Run pytest using pexpect. 1522 1523 This makes sure to use the right pytest and sets up the temporary 1524 directory locations. 1525 1526 The pexpect child is returned. 1527 """ 1528 basetemp = self.path / "temp-pexpect" 1529 basetemp.mkdir(mode=0o700) 1530 invoke = " ".join(map(str, self._getpytestargs())) 1531 cmd = f"{invoke} --basetemp={basetemp} {string}" 1532 return self.spawn(cmd, expect_timeout=expect_timeout) 1533 1534 def spawn(self, cmd: str, expect_timeout: float = 10.0) -> pexpect.spawn: 1535 """Run a command using pexpect. 1536 1537 The pexpect child is returned. 1538 """ 1539 pexpect = importorskip("pexpect", "3.0") 1540 if hasattr(sys, "pypy_version_info") and "64" in platform.machine(): 1541 skip("pypy-64 bit not supported") 1542 if not hasattr(pexpect, "spawn"): 1543 skip("pexpect.spawn not available") 1544 logfile = self.path.joinpath("spawn.out").open("wb") 1545 1546 child = pexpect.spawn(cmd, logfile=logfile, timeout=expect_timeout) 1547 self._request.addfinalizer(logfile.close) 1548 return child
Facilities to write tests/configuration files, execute pytest in isolation, and match against expected output, perfect for black-box testing of pytest plugins.
It attempts to isolate the test run from external factors as much as possible, modifying
the current working directory to path and environment variables during initialization.
666 def __init__( 667 self, 668 request: FixtureRequest, 669 tmp_path_factory: TempPathFactory, 670 monkeypatch: MonkeyPatch, 671 *, 672 _ispytest: bool = False, 673 ) -> None: 674 check_ispytest(_ispytest) 675 self._request = request 676 self._mod_collections: WeakKeyDictionary[Collector, list[Item | Collector]] = ( 677 WeakKeyDictionary() 678 ) 679 if request.function: 680 name: str = request.function.__name__ 681 else: 682 name = request.node.name 683 self._name = name 684 self._path: Path = tmp_path_factory.mktemp(name, numbered=True) 685 #: A list of plugins to use with :py:meth:`parseconfig` and 686 #: :py:meth:`runpytest`. Initially this is an empty list but plugins can 687 #: be added to the list. 688 #: 689 #: When running in subprocess mode, specify plugins by name (str) - adding 690 #: plugin objects directly is not supported. 691 self.plugins: list[str | _PluggyPlugin] = [] 692 self._sys_path_snapshot = SysPathsSnapshot() 693 self._sys_modules_snapshot = self.__take_sys_modules_snapshot() 694 self._request.addfinalizer(self._finalize) 695 self._method = self._request.config.getoption("--runpytest") 696 self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True) 697 698 self._monkeypatch = mp = monkeypatch 699 self.chdir() 700 mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot)) 701 # Ensure no unexpected caching via tox. 702 mp.delenv("TOX_ENV_DIR", raising=False) 703 # Discard outer pytest options. 704 mp.delenv("PYTEST_ADDOPTS", raising=False) 705 # Ensure no user config is used. 706 tmphome = str(self.path) 707 mp.setenv("HOME", tmphome) 708 mp.setenv("USERPROFILE", tmphome) 709 # Do not use colors for inner runs by default. 710 mp.setenv("PY_COLORS", "0")
712 @property 713 def path(self) -> Path: 714 """Temporary directory path used to create files/run tests from, etc.""" 715 return self._path
Temporary directory path used to create files/run tests from, etc.
743 def make_hook_recorder(self, pluginmanager: PytestPluginManager) -> HookRecorder: 744 """Create a new :class:`HookRecorder` for a :class:`PytestPluginManager`.""" 745 pluginmanager.reprec = reprec = HookRecorder(pluginmanager, _ispytest=True) # type: ignore[attr-defined] 746 self._request.addfinalizer(reprec.finish_recording) 747 return reprec
Create a new HookRecorder for a PytestPluginManager.
749 def chdir(self) -> None: 750 """Cd into the temporary directory. 751 752 This is done automatically upon instantiation. 753 """ 754 self._monkeypatch.chdir(self.path)
Cd into the temporary directory.
This is done automatically upon instantiation.
793 def makefile(self, ext: str, *args: str, **kwargs: str) -> Path: 794 r"""Create new text file(s) in the test directory. 795 796 :param ext: 797 The extension the file(s) should use, including the dot, e.g. `.py`. 798 :param args: 799 All args are treated as strings and joined using newlines. 800 The result is written as contents to the file. The name of the 801 file is based on the test function requesting this fixture. 802 :param kwargs: 803 Each keyword is the name of a file, while the value of it will 804 be written as contents of the file. 805 :returns: 806 The first created file. 807 808 Examples: 809 810 .. code-block:: python 811 812 pytester.makefile(".txt", "line1", "line2") 813 814 pytester.makefile(".ini", pytest="[pytest]\naddopts=-rs\n") 815 816 To create binary files, use :meth:`pathlib.Path.write_bytes` directly: 817 818 .. code-block:: python 819 820 filename = pytester.path.joinpath("foo.bin") 821 filename.write_bytes(b"...") 822 """ 823 return self._makefile(ext, args, kwargs)
Create new text file(s) in the test directory.
Parameters
- ext:
The extension the file(s) should use, including the dot, e.g.
.py. - args: All args are treated as strings and joined using newlines. The result is written as contents to the file. The name of the file is based on the test function requesting this fixture.
- kwargs: Each keyword is the name of a file, while the value of it will be written as contents of the file. :returns: The first created file.
Examples:
pytester.makefile(".txt", "line1", "line2")
pytester.makefile(".ini", pytest="[pytest]\naddopts=-rs\n")
To create binary files, use pathlib.Path.write_bytes() directly:
filename = pytester.path.joinpath("foo.bin")
filename.write_bytes(b"...")
825 def makeconftest(self, source: str) -> Path: 826 """Write a conftest.py file. 827 828 :param source: The contents. 829 :returns: The conftest.py file. 830 """ 831 return self.makepyfile(conftest=source)
Write a conftest.py file.
Parameters
- source: The contents. :returns: The conftest.py file.
833 def makeini(self, source: str) -> Path: 834 """Write a tox.ini file. 835 836 :param source: The contents. 837 :returns: The tox.ini file. 838 """ 839 return self.makefile(".ini", tox=source)
Write a tox.ini file.
Parameters
- source: The contents. :returns: The tox.ini file.
841 def maketoml(self, source: str) -> Path: 842 """Write a pytest.toml file. 843 844 :param source: The contents. 845 :returns: The pytest.toml file. 846 847 .. versionadded:: 9.0 848 """ 849 return self.makefile(".toml", pytest=source)
Write a pytest.toml file.
Parameters
- source: The contents. :returns: The pytest.toml file.
New in version 9.0.
851 def getinicfg(self, source: str) -> SectionWrapper: 852 """Return the pytest section from the tox.ini config file.""" 853 p = self.makeini(source) 854 return IniConfig(str(p))["pytest"]
Return the pytest section from the tox.ini config file.
856 def makepyprojecttoml(self, source: str) -> Path: 857 """Write a pyproject.toml file. 858 859 :param source: The contents. 860 :returns: The pyproject.ini file. 861 862 .. versionadded:: 6.0 863 """ 864 return self.makefile(".toml", pyproject=source)
Write a pyproject.toml file.
Parameters
- source: The contents. :returns: The pyproject.ini file.
New in version 6.0.
866 def makepyfile(self, *args, **kwargs) -> Path: 867 r"""Shortcut for .makefile() with a .py extension. 868 869 Defaults to the test name with a '.py' extension, e.g test_foobar.py, overwriting 870 existing files. 871 872 Examples: 873 874 .. code-block:: python 875 876 def test_something(pytester): 877 # Initial file is created test_something.py. 878 pytester.makepyfile("foobar") 879 # To create multiple files, pass kwargs accordingly. 880 pytester.makepyfile(custom="foobar") 881 # At this point, both 'test_something.py' & 'custom.py' exist in the test directory. 882 883 """ 884 return self._makefile(".py", args, kwargs)
Shortcut for .makefile() with a .py extension.
Defaults to the test name with a '.py' extension, e.g test_foobar.py, overwriting existing files.
Examples:
def test_something(pytester):
# Initial file is created test_something.py.
pytester.makepyfile("foobar")
# To create multiple files, pass kwargs accordingly.
pytester.makepyfile(custom="foobar")
# At this point, both 'test_something.py' & 'custom.py' exist in the test directory.
886 def maketxtfile(self, *args, **kwargs) -> Path: 887 r"""Shortcut for .makefile() with a .txt extension. 888 889 Defaults to the test name with a '.txt' extension, e.g test_foobar.txt, overwriting 890 existing files. 891 892 Examples: 893 894 .. code-block:: python 895 896 def test_something(pytester): 897 # Initial file is created test_something.txt. 898 pytester.maketxtfile("foobar") 899 # To create multiple files, pass kwargs accordingly. 900 pytester.maketxtfile(custom="foobar") 901 # At this point, both 'test_something.txt' & 'custom.txt' exist in the test directory. 902 903 """ 904 return self._makefile(".txt", args, kwargs)
Shortcut for .makefile() with a .txt extension.
Defaults to the test name with a '.txt' extension, e.g test_foobar.txt, overwriting existing files.
Examples:
def test_something(pytester):
# Initial file is created test_something.txt.
pytester.maketxtfile("foobar")
# To create multiple files, pass kwargs accordingly.
pytester.maketxtfile(custom="foobar")
# At this point, both 'test_something.txt' & 'custom.txt' exist in the test directory.
906 def syspathinsert(self, path: str | os.PathLike[str] | None = None) -> None: 907 """Prepend a directory to sys.path, defaults to :attr:`path`. 908 909 This is undone automatically when this object dies at the end of each 910 test. 911 912 :param path: 913 The path. 914 """ 915 if path is None: 916 path = self.path 917 918 self._monkeypatch.syspath_prepend(str(path))
Prepend a directory to sys.path, defaults to path.
This is undone automatically when this object dies at the end of each test.
Parameters
- path: The path.
920 def mkdir(self, name: str | os.PathLike[str]) -> Path: 921 """Create a new (sub)directory. 922 923 :param name: 924 The name of the directory, relative to the pytester path. 925 :returns: 926 The created directory. 927 :rtype: pathlib.Path 928 """ 929 p = self.path / name 930 p.mkdir() 931 return p
Create a new (sub)directory.
Parameters
- name: The name of the directory, relative to the pytester path. :returns: The created directory.
933 def mkpydir(self, name: str | os.PathLike[str]) -> Path: 934 """Create a new python package. 935 936 This creates a (sub)directory with an empty ``__init__.py`` file so it 937 gets recognised as a Python package. 938 """ 939 p = self.path / name 940 p.mkdir() 941 p.joinpath("__init__.py").touch() 942 return p
Create a new python package.
This creates a (sub)directory with an empty __init__.py file so it
gets recognised as a Python package.
944 def copy_example(self, name: str | None = None) -> Path: 945 """Copy file from project's directory into the testdir. 946 947 :param name: 948 The name of the file to copy. 949 :return: 950 Path to the copied directory (inside ``self.path``). 951 :rtype: pathlib.Path 952 """ 953 example_dir_ = self._request.config.getini("pytester_example_dir") 954 if example_dir_ is None: 955 raise ValueError("pytester_example_dir is unset, can't copy examples") 956 example_dir: Path = self._request.config.rootpath / example_dir_ 957 958 for extra_element in self._request.node.iter_markers("pytester_example_path"): 959 assert extra_element.args 960 example_dir = example_dir.joinpath(*extra_element.args) 961 962 if name is None: 963 func_name = self._name 964 maybe_dir = example_dir / func_name 965 maybe_file = example_dir / (func_name + ".py") 966 967 if maybe_dir.is_dir(): 968 example_path = maybe_dir 969 elif maybe_file.is_file(): 970 example_path = maybe_file 971 else: 972 raise LookupError( 973 f"{func_name} can't be found as module or package in {example_dir}" 974 ) 975 else: 976 example_path = example_dir.joinpath(name) 977 978 if example_path.is_dir() and not example_path.joinpath("__init__.py").is_file(): 979 shutil.copytree(example_path, self.path, symlinks=True, dirs_exist_ok=True) 980 return self.path 981 elif example_path.is_file(): 982 result = self.path.joinpath(example_path.name) 983 shutil.copy(example_path, result) 984 return result 985 else: 986 raise LookupError( 987 f'example "{example_path}" is not found as a file or directory' 988 )
Copy file from project's directory into the testdir.
Parameters
- name: The name of the file to copy.
Returns
Path to the copied directory (inside ``self.path``).
990 def getnode(self, config: Config, arg: str | os.PathLike[str]) -> Collector | Item: 991 """Get the collection node of a file. 992 993 :param config: 994 A pytest config. 995 See :py:meth:`parseconfig` and :py:meth:`parseconfigure` for creating it. 996 :param arg: 997 Path to the file. 998 :returns: 999 The node. 1000 """ 1001 session = Session.from_config(config) 1002 assert "::" not in str(arg) 1003 p = Path(os.path.abspath(arg)) 1004 config.hook.pytest_sessionstart(session=session) 1005 res = session.perform_collect([str(p)], genitems=False)[0] 1006 config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) 1007 return res
Get the collection node of a file.
Parameters
- config:
A pytest config.
See
parseconfig()andparseconfigure()for creating it. - arg: Path to the file. :returns: The node.
1009 def getpathnode(self, path: str | os.PathLike[str]) -> Collector | Item: 1010 """Return the collection node of a file. 1011 1012 This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to 1013 create the (configured) pytest Config instance. 1014 1015 :param path: 1016 Path to the file. 1017 :returns: 1018 The node. 1019 """ 1020 path = Path(path) 1021 config = self.parseconfigure(path) 1022 session = Session.from_config(config) 1023 x = bestrelpath(session.path, path) 1024 config.hook.pytest_sessionstart(session=session) 1025 res = session.perform_collect([x], genitems=False)[0] 1026 config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) 1027 return res
Return the collection node of a file.
This is like getnode() but uses parseconfigure() to
create the (configured) pytest Config instance.
Parameters
- path: Path to the file. :returns: The node.
1029 def genitems(self, colitems: Sequence[Item | Collector]) -> list[Item]: 1030 """Generate all test items from a collection node. 1031 1032 This recurses into the collection node and returns a list of all the 1033 test items contained within. 1034 1035 :param colitems: 1036 The collection nodes. 1037 :returns: 1038 The collected items. 1039 """ 1040 session = colitems[0].session 1041 result: list[Item] = [] 1042 for colitem in colitems: 1043 result.extend(session.genitems(colitem)) 1044 return result
Generate all test items from a collection node.
This recurses into the collection node and returns a list of all the test items contained within.
Parameters
- colitems: The collection nodes. :returns: The collected items.
1046 def runitem(self, source: str) -> Any: 1047 """Run the "test_func" Item. 1048 1049 The calling test instance (class containing the test method) must 1050 provide a ``.getrunner()`` method which should return a runner which 1051 can run the test protocol for a single item, e.g. 1052 ``_pytest.runner.runtestprotocol``. 1053 """ 1054 # used from runner functional tests 1055 item = self.getitem(source) 1056 # the test class where we are called from wants to provide the runner 1057 testclassinstance = self._request.instance 1058 runner = testclassinstance.getrunner() 1059 return runner(item)
Run the "test_func" Item.
The calling test instance (class containing the test method) must
provide a .getrunner() method which should return a runner which
can run the test protocol for a single item, e.g.
_pytest.runner.runtestprotocol.
1061 def inline_runsource(self, source: str, *cmdlineargs) -> HookRecorder: 1062 """Run a test module in process using ``pytest.main()``. 1063 1064 This run writes "source" into a temporary file and runs 1065 ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance 1066 for the result. 1067 1068 :param source: The source code of the test module. 1069 :param cmdlineargs: Any extra command line arguments to use. 1070 """ 1071 p = self.makepyfile(source) 1072 values = [*list(cmdlineargs), p] 1073 return self.inline_run(*values)
Run a test module in process using pytest.main().
This run writes "source" into a temporary file and runs
pytest.main() on it, returning a HookRecorder instance
for the result.
Parameters
- source: The source code of the test module.
- cmdlineargs: Any extra command line arguments to use.
1075 def inline_genitems(self, *args) -> tuple[list[Item], HookRecorder]: 1076 """Run ``pytest.main(['--collect-only'])`` in-process. 1077 1078 Runs the :py:func:`pytest.main` function to run all of pytest inside 1079 the test process itself like :py:meth:`inline_run`, but returns a 1080 tuple of the collected items and a :py:class:`HookRecorder` instance. 1081 """ 1082 rec = self.inline_run("--collect-only", *args) 1083 items = [x.item for x in rec.getcalls("pytest_itemcollected")] 1084 return items, rec
Run pytest.main(['--collect-only']) in-process.
Runs the pytest.main() function to run all of pytest inside
the test process itself like inline_run(), but returns a
tuple of the collected items and a HookRecorder instance.
1086 def inline_run( 1087 self, 1088 *args: str | os.PathLike[str], 1089 plugins=(), 1090 no_reraise_ctrlc: bool = False, 1091 ) -> HookRecorder: 1092 """Run ``pytest.main()`` in-process, returning a HookRecorder. 1093 1094 Runs the :py:func:`pytest.main` function to run all of pytest inside 1095 the test process itself. This means it can return a 1096 :py:class:`HookRecorder` instance which gives more detailed results 1097 from that run than can be done by matching stdout/stderr from 1098 :py:meth:`runpytest`. 1099 1100 :param args: 1101 Command line arguments to pass to :py:func:`pytest.main`. 1102 :param plugins: 1103 Extra plugin instances the ``pytest.main()`` instance should use. 1104 :param no_reraise_ctrlc: 1105 Typically we reraise keyboard interrupts from the child run. If 1106 True, the KeyboardInterrupt exception is captured. 1107 """ 1108 from _pytest.unraisableexception import gc_collect_iterations_key 1109 1110 # (maybe a cpython bug?) the importlib cache sometimes isn't updated 1111 # properly between file creation and inline_run (especially if imports 1112 # are interspersed with file creation) 1113 importlib.invalidate_caches() 1114 1115 plugins = list(plugins) 1116 finalizers = [] 1117 try: 1118 # Any sys.module or sys.path changes done while running pytest 1119 # inline should be reverted after the test run completes to avoid 1120 # clashing with later inline tests run within the same pytest test, 1121 # e.g. just because they use matching test module names. 1122 finalizers.append(self.__take_sys_modules_snapshot().restore) 1123 finalizers.append(SysPathsSnapshot().restore) 1124 1125 # Important note: 1126 # - our tests should not leave any other references/registrations 1127 # laying around other than possibly loaded test modules 1128 # referenced from sys.modules, as nothing will clean those up 1129 # automatically 1130 1131 rec = [] 1132 1133 class PytesterHelperPlugin: 1134 @staticmethod 1135 def pytest_configure(config: Config) -> None: 1136 rec.append(self.make_hook_recorder(config.pluginmanager)) 1137 1138 # The unraisable plugin GC collect slows down inline 1139 # pytester runs too much. 1140 config.stash[gc_collect_iterations_key] = 0 1141 1142 plugins.append(PytesterHelperPlugin()) 1143 ret = main([str(x) for x in args], plugins=plugins) 1144 if len(rec) == 1: 1145 reprec = rec.pop() 1146 else: 1147 1148 class reprec: # type: ignore 1149 pass 1150 1151 reprec.ret = ret 1152 1153 # Typically we reraise keyboard interrupts from the child run 1154 # because it's our user requesting interruption of the testing. 1155 if ret == ExitCode.INTERRUPTED and not no_reraise_ctrlc: 1156 calls = reprec.getcalls("pytest_keyboard_interrupt") 1157 if calls and calls[-1].excinfo.type == KeyboardInterrupt: 1158 raise KeyboardInterrupt() 1159 return reprec 1160 finally: 1161 for finalizer in finalizers: 1162 finalizer()
Run pytest.main() in-process, returning a HookRecorder.
Runs the pytest.main() function to run all of pytest inside
the test process itself. This means it can return a
HookRecorder instance which gives more detailed results
from that run than can be done by matching stdout/stderr from
runpytest().
Parameters
- args:
Command line arguments to pass to
pytest.main(). - plugins:
Extra plugin instances the
pytest.main()instance should use. - no_reraise_ctrlc: Typically we reraise keyboard interrupts from the child run. If True, the KeyboardInterrupt exception is captured.
1164 def runpytest_inprocess( 1165 self, *args: str | os.PathLike[str], **kwargs: Any 1166 ) -> RunResult: 1167 """Return result of running pytest in-process, providing a similar 1168 interface to what self.runpytest() provides.""" 1169 syspathinsert = kwargs.pop("syspathinsert", False) 1170 1171 if syspathinsert: 1172 self.syspathinsert() 1173 instant = timing.Instant() 1174 capture = _get_multicapture("sys") 1175 capture.start_capturing() 1176 try: 1177 try: 1178 reprec = self.inline_run(*args, **kwargs) 1179 except SystemExit as e: 1180 ret = e.args[0] 1181 try: 1182 ret = ExitCode(e.args[0]) 1183 except ValueError: 1184 pass 1185 1186 class reprec: # type: ignore 1187 ret = ret 1188 1189 except Exception: 1190 traceback.print_exc() 1191 1192 class reprec: # type: ignore 1193 ret = ExitCode(3) 1194 1195 finally: 1196 out, err = capture.readouterr() 1197 capture.stop_capturing() 1198 sys.stdout.write(out) 1199 sys.stderr.write(err) 1200 1201 assert reprec.ret is not None 1202 res = RunResult( 1203 reprec.ret, out.splitlines(), err.splitlines(), instant.elapsed().seconds 1204 ) 1205 res.reprec = reprec # type: ignore 1206 return res
Return result of running pytest in-process, providing a similar interface to what self.runpytest() provides.
1208 def runpytest(self, *args: str | os.PathLike[str], **kwargs: Any) -> RunResult: 1209 """Run pytest inline or in a subprocess, depending on the command line 1210 option "--runpytest" and return a :py:class:`~pytest.RunResult`.""" 1211 new_args = self._ensure_basetemp(args) 1212 if self._method == "inprocess": 1213 return self.runpytest_inprocess(*new_args, **kwargs) 1214 elif self._method == "subprocess": 1215 return self.runpytest_subprocess(*new_args, **kwargs) 1216 raise RuntimeError(f"Unrecognized runpytest option: {self._method}")
Run pytest inline or in a subprocess, depending on the command line
option "--runpytest" and return a ~pytest.RunResult.
1231 def parseconfig(self, *args: str | os.PathLike[str]) -> Config: 1232 """Return a new pytest :class:`pytest.Config` instance from given 1233 commandline args. 1234 1235 This invokes the pytest bootstrapping code in _pytest.config to create a 1236 new :py:class:`pytest.PytestPluginManager` and call the 1237 :hook:`pytest_cmdline_parse` hook to create a new :class:`pytest.Config` 1238 instance. 1239 1240 If :attr:`plugins` has been populated they should be plugin modules 1241 to be registered with the plugin manager. 1242 """ 1243 import _pytest.config 1244 1245 new_args = [str(x) for x in self._ensure_basetemp(args)] 1246 1247 config = _pytest.config._prepareconfig(new_args, self.plugins) 1248 # we don't know what the test will do with this half-setup config 1249 # object and thus we make sure it gets unconfigured properly in any 1250 # case (otherwise capturing could still be active, for example) 1251 self._request.addfinalizer(config._ensure_unconfigure) 1252 return config
Return a new pytest pytest.Config instance from given
commandline args.
This invokes the pytest bootstrapping code in _pytest.config to create a
new pytest.PytestPluginManager and call the
:hook:pytest_cmdline_parse hook to create a new pytest.Config
instance.
If plugins has been populated they should be plugin modules
to be registered with the plugin manager.
1254 def parseconfigure(self, *args: str | os.PathLike[str]) -> Config: 1255 """Return a new pytest configured Config instance. 1256 1257 Returns a new :py:class:`pytest.Config` instance like 1258 :py:meth:`parseconfig`, but also calls the :hook:`pytest_configure` 1259 hook. 1260 """ 1261 config = self.parseconfig(*args) 1262 config._do_configure() 1263 return config
Return a new pytest configured Config instance.
Returns a new pytest.Config instance like
parseconfig(), but also calls the :hook:pytest_configure
hook.
1265 def getitem( 1266 self, source: str | os.PathLike[str], funcname: str = "test_func" 1267 ) -> Item: 1268 """Return the test item for a test function. 1269 1270 Writes the source to a python file and runs pytest's collection on 1271 the resulting module, returning the test item for the requested 1272 function name. 1273 1274 :param source: 1275 The module source. 1276 :param funcname: 1277 The name of the test function for which to return a test item. 1278 :returns: 1279 The test item. 1280 """ 1281 items = self.getitems(source) 1282 for item in items: 1283 if item.name == funcname: 1284 return item 1285 assert 0, f"{funcname!r} item not found in module:\n{source}\nitems: {items}"
Return the test item for a test function.
Writes the source to a python file and runs pytest's collection on the resulting module, returning the test item for the requested function name.
Parameters
- source: The module source.
- funcname: The name of the test function for which to return a test item. :returns: The test item.
1287 def getitems(self, source: str | os.PathLike[str]) -> list[Item]: 1288 """Return all test items collected from the module. 1289 1290 Writes the source to a Python file and runs pytest's collection on 1291 the resulting module, returning all test items contained within. 1292 """ 1293 modcol = self.getmodulecol(source) 1294 return self.genitems([modcol])
Return all test items collected from the module.
Writes the source to a Python file and runs pytest's collection on the resulting module, returning all test items contained within.
1296 def getmodulecol( 1297 self, 1298 source: str | os.PathLike[str], 1299 configargs=(), 1300 *, 1301 withinit: bool = False, 1302 ): 1303 """Return the module collection node for ``source``. 1304 1305 Writes ``source`` to a file using :py:meth:`makepyfile` and then 1306 runs the pytest collection on it, returning the collection node for the 1307 test module. 1308 1309 :param source: 1310 The source code of the module to collect. 1311 1312 :param configargs: 1313 Any extra arguments to pass to :py:meth:`parseconfigure`. 1314 1315 :param withinit: 1316 Whether to also write an ``__init__.py`` file to the same 1317 directory to ensure it is a package. 1318 """ 1319 if isinstance(source, os.PathLike): 1320 path = self.path.joinpath(source) 1321 assert not withinit, "not supported for paths" 1322 else: 1323 kw = {self._name: str(source)} 1324 path = self.makepyfile(**kw) 1325 if withinit: 1326 self.makepyfile(__init__="#") 1327 self.config = config = self.parseconfigure(path, *configargs) 1328 return self.getnode(config, path)
Return the module collection node for source.
Writes source to a file using makepyfile() and then
runs the pytest collection on it, returning the collection node for the
test module.
Parameters
source: The source code of the module to collect.
configargs: Any extra arguments to pass to
parseconfigure().withinit: Whether to also write an
__init__.pyfile to the same directory to ensure it is a package.
1330 def collect_by_name(self, modcol: Collector, name: str) -> Item | Collector | None: 1331 """Return the collection node for name from the module collection. 1332 1333 Searches a module collection node for a collection node matching the 1334 given name. 1335 1336 :param modcol: A module collection node; see :py:meth:`getmodulecol`. 1337 :param name: The name of the node to return. 1338 """ 1339 if modcol not in self._mod_collections: 1340 self._mod_collections[modcol] = list(modcol.collect()) 1341 for colitem in self._mod_collections[modcol]: 1342 if colitem.name == name: 1343 return colitem 1344 return None
Return the collection node for name from the module collection.
Searches a module collection node for a collection node matching the given name.
Parameters
- modcol: A module collection node; see
getmodulecol(). - name: The name of the node to return.
1346 def popen( 1347 self, 1348 cmdargs: Sequence[str | os.PathLike[str]], 1349 stdout: int | TextIO = subprocess.PIPE, 1350 stderr: int | TextIO = subprocess.PIPE, 1351 stdin: NotSetType | bytes | IO[Any] | int = CLOSE_STDIN, 1352 **kw, 1353 ): 1354 """Invoke :py:class:`subprocess.Popen`. 1355 1356 Calls :py:class:`subprocess.Popen` making sure the current working 1357 directory is in ``PYTHONPATH``. 1358 1359 You probably want to use :py:meth:`run` instead. 1360 """ 1361 env = os.environ.copy() 1362 env["PYTHONPATH"] = os.pathsep.join( 1363 filter(None, [os.getcwd(), env.get("PYTHONPATH", "")]) 1364 ) 1365 kw["env"] = env 1366 1367 if stdin is self.CLOSE_STDIN: 1368 kw["stdin"] = subprocess.PIPE 1369 elif isinstance(stdin, bytes): 1370 kw["stdin"] = subprocess.PIPE 1371 else: 1372 kw["stdin"] = stdin 1373 1374 popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) 1375 if stdin is self.CLOSE_STDIN: 1376 assert popen.stdin is not None 1377 popen.stdin.close() 1378 elif isinstance(stdin, bytes): 1379 assert popen.stdin is not None 1380 popen.stdin.write(stdin) 1381 1382 return popen
Invoke subprocess.Popen.
Calls subprocess.Popen making sure the current working
directory is in PYTHONPATH.
You probably want to use run() instead.
1384 def run( 1385 self, 1386 *cmdargs: str | os.PathLike[str], 1387 timeout: float | None = None, 1388 stdin: NotSetType | bytes | IO[Any] | int = CLOSE_STDIN, 1389 ) -> RunResult: 1390 """Run a command with arguments. 1391 1392 Run a process using :py:class:`subprocess.Popen` saving the stdout and 1393 stderr. 1394 1395 :param cmdargs: 1396 The sequence of arguments to pass to :py:class:`subprocess.Popen`, 1397 with path-like objects being converted to :py:class:`str` 1398 automatically. 1399 :param timeout: 1400 The period in seconds after which to timeout and raise 1401 :py:class:`Pytester.TimeoutExpired`. 1402 :param stdin: 1403 Optional standard input. 1404 1405 - If it is ``CLOSE_STDIN`` (Default), then this method calls 1406 :py:class:`subprocess.Popen` with ``stdin=subprocess.PIPE``, and 1407 the standard input is closed immediately after the new command is 1408 started. 1409 1410 - If it is of type :py:class:`bytes`, these bytes are sent to the 1411 standard input of the command. 1412 1413 - Otherwise, it is passed through to :py:class:`subprocess.Popen`. 1414 For further information in this case, consult the document of the 1415 ``stdin`` parameter in :py:class:`subprocess.Popen`. 1416 :type stdin: _pytest.compat.NotSetType | bytes | IO[Any] | int 1417 :returns: 1418 The result. 1419 1420 """ 1421 __tracebackhide__ = True 1422 1423 cmdargs = tuple(os.fspath(arg) for arg in cmdargs) 1424 p1 = self.path.joinpath("stdout") 1425 p2 = self.path.joinpath("stderr") 1426 print("running:", *cmdargs) 1427 print(" in:", Path.cwd()) 1428 1429 with p1.open("w", encoding="utf8") as f1, p2.open("w", encoding="utf8") as f2: 1430 instant = timing.Instant() 1431 popen = self.popen( 1432 cmdargs, 1433 stdin=stdin, 1434 stdout=f1, 1435 stderr=f2, 1436 ) 1437 if popen.stdin is not None: 1438 popen.stdin.close() 1439 1440 def handle_timeout() -> None: 1441 __tracebackhide__ = True 1442 1443 timeout_message = f"{timeout} second timeout expired running: {cmdargs}" 1444 1445 popen.kill() 1446 popen.wait() 1447 raise self.TimeoutExpired(timeout_message) 1448 1449 if timeout is None: 1450 ret = popen.wait() 1451 else: 1452 try: 1453 ret = popen.wait(timeout) 1454 except subprocess.TimeoutExpired: 1455 handle_timeout() 1456 f1.flush() 1457 f2.flush() 1458 1459 with p1.open(encoding="utf8") as f1, p2.open(encoding="utf8") as f2: 1460 out = f1.read().splitlines() 1461 err = f2.read().splitlines() 1462 1463 self._dump_lines(out, sys.stdout) 1464 self._dump_lines(err, sys.stderr) 1465 1466 with contextlib.suppress(ValueError): 1467 ret = ExitCode(ret) 1468 return RunResult(ret, out, err, instant.elapsed().seconds)
Run a command with arguments.
Run a process using subprocess.Popen saving the stdout and
stderr.
Parameters
- cmdargs:
The sequence of arguments to pass to
subprocess.Popen, with path-like objects being converted tostrautomatically. - timeout:
The period in seconds after which to timeout and raise
Pytester.TimeoutExpired. stdin: Optional standard input.
If it is
CLOSE_STDIN(Default), then this method callssubprocess.Popenwithstdin=subprocess.PIPE, and the standard input is closed immediately after the new command is started.If it is of type
bytes, these bytes are sent to the standard input of the command.Otherwise, it is passed through to
subprocess.Popen. For further information in this case, consult the document of thestdinparameter insubprocess.Popen. :returns: The result.
1480 def runpython(self, script: os.PathLike[str]) -> RunResult: 1481 """Run a python script using sys.executable as interpreter.""" 1482 return self.run(sys.executable, script)
Run a python script using sys.executable as interpreter.
1484 def runpython_c(self, command: str) -> RunResult: 1485 """Run ``python -c "command"``.""" 1486 return self.run(sys.executable, "-c", command)
Run python -c "command".
1488 def runpytest_subprocess( 1489 self, *args: str | os.PathLike[str], timeout: float | None = None 1490 ) -> RunResult: 1491 """Run pytest as a subprocess with given arguments. 1492 1493 Any plugins added to the :py:attr:`plugins` list will be added using the 1494 ``-p`` command line option. Additionally ``--basetemp`` is used to put 1495 any temporary files and directories in a numbered directory prefixed 1496 with "runpytest-" to not conflict with the normal numbered pytest 1497 location for temporary files and directories. 1498 1499 :param args: 1500 The sequence of arguments to pass to the pytest subprocess. 1501 :param timeout: 1502 The period in seconds after which to timeout and raise 1503 :py:class:`Pytester.TimeoutExpired`. 1504 :returns: 1505 The result. 1506 """ 1507 __tracebackhide__ = True 1508 p = make_numbered_dir(root=self.path, prefix="runpytest-", mode=0o700) 1509 args = (f"--basetemp={p}", *args) 1510 for plugin in self.plugins: 1511 if not isinstance(plugin, str): 1512 raise ValueError( 1513 f"Specifying plugins as objects is not supported in pytester subprocess mode; " 1514 f"specify by name instead: {plugin}" 1515 ) 1516 args = ("-p", plugin, *args) 1517 args = self._getpytestargs() + args 1518 return self.run(*args, timeout=timeout)
Run pytest as a subprocess with given arguments.
Any plugins added to the plugins list will be added using the
-p command line option. Additionally --basetemp is used to put
any temporary files and directories in a numbered directory prefixed
with "runpytest-" to not conflict with the normal numbered pytest
location for temporary files and directories.
Parameters
- args: The sequence of arguments to pass to the pytest subprocess.
- timeout:
The period in seconds after which to timeout and raise
Pytester.TimeoutExpired. :returns: The result.
1520 def spawn_pytest(self, string: str, expect_timeout: float = 10.0) -> pexpect.spawn: 1521 """Run pytest using pexpect. 1522 1523 This makes sure to use the right pytest and sets up the temporary 1524 directory locations. 1525 1526 The pexpect child is returned. 1527 """ 1528 basetemp = self.path / "temp-pexpect" 1529 basetemp.mkdir(mode=0o700) 1530 invoke = " ".join(map(str, self._getpytestargs())) 1531 cmd = f"{invoke} --basetemp={basetemp} {string}" 1532 return self.spawn(cmd, expect_timeout=expect_timeout)
Run pytest using pexpect.
This makes sure to use the right pytest and sets up the temporary directory locations.
The pexpect child is returned.
1534 def spawn(self, cmd: str, expect_timeout: float = 10.0) -> pexpect.spawn: 1535 """Run a command using pexpect. 1536 1537 The pexpect child is returned. 1538 """ 1539 pexpect = importorskip("pexpect", "3.0") 1540 if hasattr(sys, "pypy_version_info") and "64" in platform.machine(): 1541 skip("pypy-64 bit not supported") 1542 if not hasattr(pexpect, "spawn"): 1543 skip("pexpect.spawn not available") 1544 logfile = self.path.joinpath("spawn.out").open("wb") 1545 1546 child = pexpect.spawn(cmd, logfile=logfile, timeout=expect_timeout) 1547 self._request.addfinalizer(logfile.close) 1548 return child
Run a command using pexpect.
The pexpect child is returned.
Common base class for all non-exit exceptions.
541@final 542class RaisesExc(AbstractRaises[BaseExcT_co_default]): 543 """ 544 .. versionadded:: 8.4 545 546 547 This is the class constructed when calling :func:`pytest.raises`, but may be used 548 directly as a helper class with :class:`RaisesGroup` when you want to specify 549 requirements on sub-exceptions. 550 551 You don't need this if you only want to specify the type, since :class:`RaisesGroup` 552 accepts ``type[BaseException]``. 553 554 :param type[BaseException] | tuple[type[BaseException]] | None expected_exception: 555 The expected type, or one of several possible types. 556 May be ``None`` in order to only make use of ``match`` and/or ``check`` 557 558 The type is checked with :func:`isinstance`, and does not need to be an exact match. 559 If that is wanted you can use the ``check`` parameter. 560 561 :kwparam str | Pattern[str] match: 562 A regex to match. 563 564 :kwparam Callable[[BaseException], bool] check: 565 If specified, a callable that will be called with the exception as a parameter 566 after checking the type and the match regex if specified. 567 If it returns ``True`` it will be considered a match, if not it will 568 be considered a failed match. 569 570 :meth:`RaisesExc.matches` can also be used standalone to check individual exceptions. 571 572 Examples:: 573 574 with RaisesGroup(RaisesExc(ValueError, match="string")) 575 ... 576 with RaisesGroup(RaisesExc(check=lambda x: x.args == (3, "hello"))): 577 ... 578 with RaisesGroup(RaisesExc(check=lambda x: type(x) is ValueError)): 579 ... 580 """ 581 582 # Trio bundled hypothesis monkeypatching, we will probably instead assume that 583 # hypothesis will handle that in their pytest plugin by the time this is released. 584 # Alternatively we could add a version of get_pretty_function_description ourselves 585 # https://github.com/HypothesisWorks/hypothesis/blob/8ced2f59f5c7bea3344e35d2d53e1f8f8eb9fcd8/hypothesis-python/src/hypothesis/internal/reflection.py#L439 586 587 # At least one of the three parameters must be passed. 588 @overload 589 def __init__( 590 self, 591 expected_exception: ( 592 type[BaseExcT_co_default] | tuple[type[BaseExcT_co_default], ...] 593 ), 594 /, 595 *, 596 match: str | Pattern[str] | None = ..., 597 check: Callable[[BaseExcT_co_default], bool] | None = ..., 598 ) -> None: ... 599 600 @overload 601 def __init__( 602 self: RaisesExc[BaseException], # Give E a value. 603 /, 604 *, 605 match: str | Pattern[str] | None, 606 # If exception_type is not provided, check() must do any typechecks itself. 607 check: Callable[[BaseException], bool] | None = ..., 608 ) -> None: ... 609 610 @overload 611 def __init__(self, /, *, check: Callable[[BaseException], bool]) -> None: ... 612 613 def __init__( 614 self, 615 expected_exception: ( 616 type[BaseExcT_co_default] | tuple[type[BaseExcT_co_default], ...] | None 617 ) = None, 618 /, 619 *, 620 match: str | Pattern[str] | None = None, 621 check: Callable[[BaseExcT_co_default], bool] | None = None, 622 ): 623 super().__init__(match=match, check=check) 624 if isinstance(expected_exception, tuple): 625 expected_exceptions = expected_exception 626 elif expected_exception is None: 627 expected_exceptions = () 628 else: 629 expected_exceptions = (expected_exception,) 630 631 if (expected_exceptions == ()) and match is None and check is None: 632 raise ValueError("You must specify at least one parameter to match on.") 633 634 self.expected_exceptions = tuple( 635 self._parse_exc(e, expected="a BaseException type") 636 for e in expected_exceptions 637 ) 638 639 self._just_propagate = False 640 641 def matches( 642 self, 643 exception: BaseException | None, 644 ) -> TypeGuard[BaseExcT_co_default]: 645 """Check if an exception matches the requirements of this :class:`RaisesExc`. 646 If it fails, :attr:`RaisesExc.fail_reason` will be set. 647 648 Examples:: 649 650 assert RaisesExc(ValueError).matches(my_exception): 651 # is equivalent to 652 assert isinstance(my_exception, ValueError) 653 654 # this can be useful when checking e.g. the ``__cause__`` of an exception. 655 with pytest.raises(ValueError) as excinfo: 656 ... 657 assert RaisesExc(SyntaxError, match="foo").matches(excinfo.value.__cause__) 658 # above line is equivalent to 659 assert isinstance(excinfo.value.__cause__, SyntaxError) 660 assert re.search("foo", str(excinfo.value.__cause__) 661 662 """ 663 self._just_propagate = False 664 if exception is None: 665 self._fail_reason = "exception is None" 666 return False 667 if not self._check_type(exception): 668 self._just_propagate = True 669 return False 670 671 if not self._check_match(exception): 672 return False 673 674 return self._check_check(exception) 675 676 def __repr__(self) -> str: 677 parameters = [] 678 if self.expected_exceptions: 679 parameters.append(_exception_type_name(self.expected_exceptions)) 680 if self.match is not None: 681 # If no flags were specified, discard the redundant re.compile() here. 682 parameters.append( 683 f"match={_match_pattern(self.match)!r}", 684 ) 685 if self.check is not None: 686 parameters.append(f"check={repr_callable(self.check)}") 687 return f"RaisesExc({', '.join(parameters)})" 688 689 def _check_type(self, exception: BaseException) -> TypeGuard[BaseExcT_co_default]: 690 self._fail_reason = _check_raw_type(self.expected_exceptions, exception) 691 return self._fail_reason is None 692 693 def __enter__(self) -> ExceptionInfo[BaseExcT_co_default]: 694 self.excinfo: ExceptionInfo[BaseExcT_co_default] = ExceptionInfo.for_later() 695 return self.excinfo 696 697 # TODO: move common code into superclass 698 def __exit__( 699 self, 700 exc_type: type[BaseException] | None, 701 exc_val: BaseException | None, 702 exc_tb: types.TracebackType | None, 703 ) -> bool: 704 __tracebackhide__ = True 705 if exc_type is None: 706 if not self.expected_exceptions: 707 fail("DID NOT RAISE any exception") 708 if len(self.expected_exceptions) > 1: 709 fail(f"DID NOT RAISE any of {self.expected_exceptions!r}") 710 711 fail(f"DID NOT RAISE {self.expected_exceptions[0]!r}") 712 713 assert self.excinfo is not None, ( 714 "Internal error - should have been constructed in __enter__" 715 ) 716 717 if not self.matches(exc_val): 718 if self._just_propagate: 719 return False 720 raise AssertionError(self._fail_reason) 721 722 # Cast to narrow the exception type now that it's verified.... 723 # even though the TypeGuard in self.matches should be narrowing 724 exc_info = cast( 725 "tuple[type[BaseExcT_co_default], BaseExcT_co_default, types.TracebackType]", 726 (exc_type, exc_val, exc_tb), 727 ) 728 self.excinfo.fill_unfilled(exc_info) 729 return True
New in version 8.4.
This is the class constructed when calling pytest.raises(), but may be used
directly as a helper class with RaisesGroup when you want to specify
requirements on sub-exceptions.
You don't need this if you only want to specify the type, since RaisesGroup
accepts type[BaseException].
Parameters
type[BaseException] | tuple[type[BaseException]] | None expected_exception: The expected type, or one of several possible types. May be
Nonein order to only make use ofmatchand/orcheckThe type is checked with
isinstance(), and does not need to be an exact match. If that is wanted you can use thecheckparameter.
:kwparam str | Pattern[str] match: A regex to match.
:kwparam Callable[[BaseException], bool] check:
If specified, a callable that will be called with the exception as a parameter
after checking the type and the match regex if specified.
If it returns True it will be considered a match, if not it will
be considered a failed match.
RaisesExc.matches() can also be used standalone to check individual exceptions.
Examples::
with RaisesGroup(RaisesExc(ValueError, match="string"))
...
with RaisesGroup(RaisesExc(check=lambda x: x.args == (3, "hello"))):
...
with RaisesGroup(RaisesExc(check=lambda x: type(x) is ValueError)):
...
613 def __init__( 614 self, 615 expected_exception: ( 616 type[BaseExcT_co_default] | tuple[type[BaseExcT_co_default], ...] | None 617 ) = None, 618 /, 619 *, 620 match: str | Pattern[str] | None = None, 621 check: Callable[[BaseExcT_co_default], bool] | None = None, 622 ): 623 super().__init__(match=match, check=check) 624 if isinstance(expected_exception, tuple): 625 expected_exceptions = expected_exception 626 elif expected_exception is None: 627 expected_exceptions = () 628 else: 629 expected_exceptions = (expected_exception,) 630 631 if (expected_exceptions == ()) and match is None and check is None: 632 raise ValueError("You must specify at least one parameter to match on.") 633 634 self.expected_exceptions = tuple( 635 self._parse_exc(e, expected="a BaseException type") 636 for e in expected_exceptions 637 ) 638 639 self._just_propagate = False
641 def matches( 642 self, 643 exception: BaseException | None, 644 ) -> TypeGuard[BaseExcT_co_default]: 645 """Check if an exception matches the requirements of this :class:`RaisesExc`. 646 If it fails, :attr:`RaisesExc.fail_reason` will be set. 647 648 Examples:: 649 650 assert RaisesExc(ValueError).matches(my_exception): 651 # is equivalent to 652 assert isinstance(my_exception, ValueError) 653 654 # this can be useful when checking e.g. the ``__cause__`` of an exception. 655 with pytest.raises(ValueError) as excinfo: 656 ... 657 assert RaisesExc(SyntaxError, match="foo").matches(excinfo.value.__cause__) 658 # above line is equivalent to 659 assert isinstance(excinfo.value.__cause__, SyntaxError) 660 assert re.search("foo", str(excinfo.value.__cause__) 661 662 """ 663 self._just_propagate = False 664 if exception is None: 665 self._fail_reason = "exception is None" 666 return False 667 if not self._check_type(exception): 668 self._just_propagate = True 669 return False 670 671 if not self._check_match(exception): 672 return False 673 674 return self._check_check(exception)
Check if an exception matches the requirements of this RaisesExc.
If it fails, RaisesExc.fail_reason will be set.
Examples::
assert RaisesExc(ValueError).matches(my_exception):
# is equivalent to
assert isinstance(my_exception, ValueError)
# this can be useful when checking e.g. the ``__cause__`` of an exception.
with pytest.raises(ValueError) as excinfo:
...
assert RaisesExc(SyntaxError, match="foo").matches(excinfo.value.__cause__)
# above line is equivalent to
assert isinstance(excinfo.value.__cause__, SyntaxError)
assert re.search("foo", str(excinfo.value.__cause__)
732@final 733class RaisesGroup(AbstractRaises[BaseExceptionGroup[BaseExcT_co]]): 734 """ 735 .. versionadded:: 8.4 736 737 Contextmanager for checking for an expected :exc:`ExceptionGroup`. 738 This works similar to :func:`pytest.raises`, but allows for specifying the structure of an :exc:`ExceptionGroup`. 739 :meth:`ExceptionInfo.group_contains` also tries to handle exception groups, 740 but it is very bad at checking that you *didn't* get unexpected exceptions. 741 742 The catching behaviour differs from :ref:`except* <except_star>`, being much 743 stricter about the structure by default. 744 By using ``allow_unwrapped=True`` and ``flatten_subgroups=True`` you can match 745 :ref:`except* <except_star>` fully when expecting a single exception. 746 747 :param args: 748 Any number of exception types, :class:`RaisesGroup` or :class:`RaisesExc` 749 to specify the exceptions contained in this exception. 750 All specified exceptions must be present in the raised group, *and no others*. 751 752 If you expect a variable number of exceptions you need to use 753 :func:`pytest.raises(ExceptionGroup) <pytest.raises>` and manually check 754 the contained exceptions. Consider making use of :meth:`RaisesExc.matches`. 755 756 It does not care about the order of the exceptions, so 757 ``RaisesGroup(ValueError, TypeError)`` 758 is equivalent to 759 ``RaisesGroup(TypeError, ValueError)``. 760 :kwparam str | re.Pattern[str] | None match: 761 If specified, a string containing a regular expression, 762 or a regular expression object, that is tested against the string 763 representation of the exception group and its :pep:`678` `__notes__` 764 using :func:`re.search`. 765 766 To match a literal string that may contain :ref:`special characters 767 <re-syntax>`, the pattern can first be escaped with :func:`re.escape`. 768 769 Note that " (5 subgroups)" will be stripped from the ``repr`` before matching. 770 :kwparam Callable[[E], bool] check: 771 If specified, a callable that will be called with the group as a parameter 772 after successfully matching the expected exceptions. If it returns ``True`` 773 it will be considered a match, if not it will be considered a failed match. 774 :kwparam bool allow_unwrapped: 775 If expecting a single exception or :class:`RaisesExc` it will match even 776 if the exception is not inside an exceptiongroup. 777 778 Using this together with ``match``, ``check`` or expecting multiple exceptions 779 will raise an error. 780 :kwparam bool flatten_subgroups: 781 "flatten" any groups inside the raised exception group, extracting all exceptions 782 inside any nested groups, before matching. Without this it expects you to 783 fully specify the nesting structure by passing :class:`RaisesGroup` as expected 784 parameter. 785 786 Examples:: 787 788 with RaisesGroup(ValueError): 789 raise ExceptionGroup("", (ValueError(),)) 790 # match 791 with RaisesGroup( 792 ValueError, 793 ValueError, 794 RaisesExc(TypeError, match="^expected int$"), 795 match="^my group$", 796 ): 797 raise ExceptionGroup( 798 "my group", 799 [ 800 ValueError(), 801 TypeError("expected int"), 802 ValueError(), 803 ], 804 ) 805 # check 806 with RaisesGroup( 807 KeyboardInterrupt, 808 match="^hello$", 809 check=lambda x: isinstance(x.__cause__, ValueError), 810 ): 811 raise BaseExceptionGroup("hello", [KeyboardInterrupt()]) from ValueError 812 # nested groups 813 with RaisesGroup(RaisesGroup(ValueError)): 814 raise ExceptionGroup("", (ExceptionGroup("", (ValueError(),)),)) 815 816 # flatten_subgroups 817 with RaisesGroup(ValueError, flatten_subgroups=True): 818 raise ExceptionGroup("", (ExceptionGroup("", (ValueError(),)),)) 819 820 # allow_unwrapped 821 with RaisesGroup(ValueError, allow_unwrapped=True): 822 raise ValueError 823 824 825 :meth:`RaisesGroup.matches` can also be used directly to check a standalone exception group. 826 827 828 The matching algorithm is greedy, which means cases such as this may fail:: 829 830 with RaisesGroup(ValueError, RaisesExc(ValueError, match="hello")): 831 raise ExceptionGroup("", (ValueError("hello"), ValueError("goodbye"))) 832 833 even though it generally does not care about the order of the exceptions in the group. 834 To avoid the above you should specify the first :exc:`ValueError` with a :class:`RaisesExc` as well. 835 836 .. note:: 837 When raised exceptions don't match the expected ones, you'll get a detailed error 838 message explaining why. This includes ``repr(check)`` if set, which in Python can be 839 overly verbose, showing memory locations etc etc. 840 841 If installed and imported (in e.g. ``conftest.py``), the ``hypothesis`` library will 842 monkeypatch this output to provide shorter & more readable repr's. 843 """ 844 845 # allow_unwrapped=True requires: singular exception, exception not being 846 # RaisesGroup instance, match is None, check is None 847 @overload 848 def __init__( 849 self, 850 expected_exception: type[BaseExcT_co] | RaisesExc[BaseExcT_co], 851 /, 852 *, 853 allow_unwrapped: Literal[True], 854 flatten_subgroups: bool = False, 855 ) -> None: ... 856 857 # flatten_subgroups = True also requires no nested RaisesGroup 858 @overload 859 def __init__( 860 self, 861 expected_exception: type[BaseExcT_co] | RaisesExc[BaseExcT_co], 862 /, 863 *other_exceptions: type[BaseExcT_co] | RaisesExc[BaseExcT_co], 864 flatten_subgroups: Literal[True], 865 match: str | Pattern[str] | None = None, 866 check: Callable[[BaseExceptionGroup[BaseExcT_co]], bool] | None = None, 867 ) -> None: ... 868 869 # simplify the typevars if possible (the following 3 are equivalent but go simpler->complicated) 870 # ... the first handles RaisesGroup[ValueError], the second RaisesGroup[ExceptionGroup[ValueError]], 871 # the third RaisesGroup[ValueError | ExceptionGroup[ValueError]]. 872 # ... otherwise, we will get results like RaisesGroup[ValueError | ExceptionGroup[Never]] (I think) 873 # (technically correct but misleading) 874 @overload 875 def __init__( 876 self: RaisesGroup[ExcT_1], 877 expected_exception: type[ExcT_1] | RaisesExc[ExcT_1], 878 /, 879 *other_exceptions: type[ExcT_1] | RaisesExc[ExcT_1], 880 match: str | Pattern[str] | None = None, 881 check: Callable[[ExceptionGroup[ExcT_1]], bool] | None = None, 882 ) -> None: ... 883 884 @overload 885 def __init__( 886 self: RaisesGroup[ExceptionGroup[ExcT_2]], 887 expected_exception: RaisesGroup[ExcT_2], 888 /, 889 *other_exceptions: RaisesGroup[ExcT_2], 890 match: str | Pattern[str] | None = None, 891 check: Callable[[ExceptionGroup[ExceptionGroup[ExcT_2]]], bool] | None = None, 892 ) -> None: ... 893 894 @overload 895 def __init__( 896 self: RaisesGroup[ExcT_1 | ExceptionGroup[ExcT_2]], 897 expected_exception: type[ExcT_1] | RaisesExc[ExcT_1] | RaisesGroup[ExcT_2], 898 /, 899 *other_exceptions: type[ExcT_1] | RaisesExc[ExcT_1] | RaisesGroup[ExcT_2], 900 match: str | Pattern[str] | None = None, 901 check: ( 902 Callable[[ExceptionGroup[ExcT_1 | ExceptionGroup[ExcT_2]]], bool] | None 903 ) = None, 904 ) -> None: ... 905 906 # same as the above 3 but handling BaseException 907 @overload 908 def __init__( 909 self: RaisesGroup[BaseExcT_1], 910 expected_exception: type[BaseExcT_1] | RaisesExc[BaseExcT_1], 911 /, 912 *other_exceptions: type[BaseExcT_1] | RaisesExc[BaseExcT_1], 913 match: str | Pattern[str] | None = None, 914 check: Callable[[BaseExceptionGroup[BaseExcT_1]], bool] | None = None, 915 ) -> None: ... 916 917 @overload 918 def __init__( 919 self: RaisesGroup[BaseExceptionGroup[BaseExcT_2]], 920 expected_exception: RaisesGroup[BaseExcT_2], 921 /, 922 *other_exceptions: RaisesGroup[BaseExcT_2], 923 match: str | Pattern[str] | None = None, 924 check: ( 925 Callable[[BaseExceptionGroup[BaseExceptionGroup[BaseExcT_2]]], bool] | None 926 ) = None, 927 ) -> None: ... 928 929 @overload 930 def __init__( 931 self: RaisesGroup[BaseExcT_1 | BaseExceptionGroup[BaseExcT_2]], 932 expected_exception: type[BaseExcT_1] 933 | RaisesExc[BaseExcT_1] 934 | RaisesGroup[BaseExcT_2], 935 /, 936 *other_exceptions: type[BaseExcT_1] 937 | RaisesExc[BaseExcT_1] 938 | RaisesGroup[BaseExcT_2], 939 match: str | Pattern[str] | None = None, 940 check: ( 941 Callable[ 942 [BaseExceptionGroup[BaseExcT_1 | BaseExceptionGroup[BaseExcT_2]]], 943 bool, 944 ] 945 | None 946 ) = None, 947 ) -> None: ... 948 949 def __init__( 950 self: RaisesGroup[ExcT_1 | BaseExcT_1 | BaseExceptionGroup[BaseExcT_2]], 951 expected_exception: type[BaseExcT_1] 952 | RaisesExc[BaseExcT_1] 953 | RaisesGroup[BaseExcT_2], 954 /, 955 *other_exceptions: type[BaseExcT_1] 956 | RaisesExc[BaseExcT_1] 957 | RaisesGroup[BaseExcT_2], 958 allow_unwrapped: bool = False, 959 flatten_subgroups: bool = False, 960 match: str | Pattern[str] | None = None, 961 check: ( 962 Callable[[BaseExceptionGroup[BaseExcT_1]], bool] 963 | Callable[[ExceptionGroup[ExcT_1]], bool] 964 | None 965 ) = None, 966 ): 967 # The type hint on the `self` and `check` parameters uses different formats 968 # that are *very* hard to reconcile while adhering to the overloads, so we cast 969 # it to avoid an error when passing it to super().__init__ 970 check = cast( 971 "Callable[[BaseExceptionGroup[ExcT_1|BaseExcT_1|BaseExceptionGroup[BaseExcT_2]]], bool]", 972 check, 973 ) 974 super().__init__(match=match, check=check) 975 self.allow_unwrapped = allow_unwrapped 976 self.flatten_subgroups: bool = flatten_subgroups 977 self.is_baseexception = False 978 979 if allow_unwrapped and other_exceptions: 980 raise ValueError( 981 "You cannot specify multiple exceptions with `allow_unwrapped=True.`" 982 " If you want to match one of multiple possible exceptions you should" 983 " use a `RaisesExc`." 984 " E.g. `RaisesExc(check=lambda e: isinstance(e, (...)))`", 985 ) 986 if allow_unwrapped and isinstance(expected_exception, RaisesGroup): 987 raise ValueError( 988 "`allow_unwrapped=True` has no effect when expecting a `RaisesGroup`." 989 " You might want it in the expected `RaisesGroup`, or" 990 " `flatten_subgroups=True` if you don't care about the structure.", 991 ) 992 if allow_unwrapped and (match is not None or check is not None): 993 raise ValueError( 994 "`allow_unwrapped=True` bypasses the `match` and `check` parameters" 995 " if the exception is unwrapped. If you intended to match/check the" 996 " exception you should use a `RaisesExc` object. If you want to match/check" 997 " the exceptiongroup when the exception *is* wrapped you need to" 998 " do e.g. `if isinstance(exc.value, ExceptionGroup):" 999 " assert RaisesGroup(...).matches(exc.value)` afterwards.", 1000 ) 1001 1002 self.expected_exceptions: tuple[ 1003 type[BaseExcT_co] | RaisesExc[BaseExcT_co] | RaisesGroup[BaseException], ... 1004 ] = tuple( 1005 self._parse_excgroup(e, "a BaseException type, RaisesExc, or RaisesGroup") 1006 for e in ( 1007 expected_exception, 1008 *other_exceptions, 1009 ) 1010 ) 1011 1012 def _parse_excgroup( 1013 self, 1014 exc: ( 1015 type[BaseExcT_co] 1016 | types.GenericAlias 1017 | RaisesExc[BaseExcT_1] 1018 | RaisesGroup[BaseExcT_2] 1019 ), 1020 expected: str, 1021 ) -> type[BaseExcT_co] | RaisesExc[BaseExcT_1] | RaisesGroup[BaseExcT_2]: 1022 # verify exception type and set `self.is_baseexception` 1023 if isinstance(exc, RaisesGroup): 1024 if self.flatten_subgroups: 1025 raise ValueError( 1026 "You cannot specify a nested structure inside a RaisesGroup with" 1027 " `flatten_subgroups=True`. The parameter will flatten subgroups" 1028 " in the raised exceptiongroup before matching, which would never" 1029 " match a nested structure.", 1030 ) 1031 self.is_baseexception |= exc.is_baseexception 1032 exc._nested = True 1033 return exc 1034 elif isinstance(exc, RaisesExc): 1035 self.is_baseexception |= exc.is_baseexception 1036 exc._nested = True 1037 return exc 1038 elif isinstance(exc, tuple): 1039 raise TypeError( 1040 f"Expected {expected}, but got {type(exc).__name__!r}.\n" 1041 "RaisesGroup does not support tuples of exception types when expecting one of " 1042 "several possible exception types like RaisesExc.\n" 1043 "If you meant to expect a group with multiple exceptions, list them as separate arguments." 1044 ) 1045 else: 1046 return super()._parse_exc(exc, expected) 1047 1048 @overload 1049 def __enter__( 1050 self: RaisesGroup[ExcT_1], 1051 ) -> ExceptionInfo[ExceptionGroup[ExcT_1]]: ... 1052 @overload 1053 def __enter__( 1054 self: RaisesGroup[BaseExcT_1], 1055 ) -> ExceptionInfo[BaseExceptionGroup[BaseExcT_1]]: ... 1056 1057 def __enter__(self) -> ExceptionInfo[BaseExceptionGroup[BaseException]]: 1058 self.excinfo: ExceptionInfo[BaseExceptionGroup[BaseExcT_co]] = ( 1059 ExceptionInfo.for_later() 1060 ) 1061 return self.excinfo 1062 1063 def __repr__(self) -> str: 1064 reqs = [ 1065 e.__name__ if isinstance(e, type) else repr(e) 1066 for e in self.expected_exceptions 1067 ] 1068 if self.allow_unwrapped: 1069 reqs.append(f"allow_unwrapped={self.allow_unwrapped}") 1070 if self.flatten_subgroups: 1071 reqs.append(f"flatten_subgroups={self.flatten_subgroups}") 1072 if self.match is not None: 1073 # If no flags were specified, discard the redundant re.compile() here. 1074 reqs.append(f"match={_match_pattern(self.match)!r}") 1075 if self.check is not None: 1076 reqs.append(f"check={repr_callable(self.check)}") 1077 return f"RaisesGroup({', '.join(reqs)})" 1078 1079 def _unroll_exceptions( 1080 self, 1081 exceptions: Sequence[BaseException], 1082 ) -> Sequence[BaseException]: 1083 """Used if `flatten_subgroups=True`.""" 1084 res: list[BaseException] = [] 1085 for exc in exceptions: 1086 if isinstance(exc, BaseExceptionGroup): 1087 res.extend(self._unroll_exceptions(exc.exceptions)) 1088 1089 else: 1090 res.append(exc) 1091 return res 1092 1093 @overload 1094 def matches( 1095 self: RaisesGroup[ExcT_1], 1096 exception: BaseException | None, 1097 ) -> TypeGuard[ExceptionGroup[ExcT_1]]: ... 1098 @overload 1099 def matches( 1100 self: RaisesGroup[BaseExcT_1], 1101 exception: BaseException | None, 1102 ) -> TypeGuard[BaseExceptionGroup[BaseExcT_1]]: ... 1103 1104 def matches( 1105 self, 1106 exception: BaseException | None, 1107 ) -> bool: 1108 """Check if an exception matches the requirements of this RaisesGroup. 1109 If it fails, `RaisesGroup.fail_reason` will be set. 1110 1111 Example:: 1112 1113 with pytest.raises(TypeError) as excinfo: 1114 ... 1115 assert RaisesGroup(ValueError).matches(excinfo.value.__cause__) 1116 # the above line is equivalent to 1117 myexc = excinfo.value.__cause 1118 assert isinstance(myexc, BaseExceptionGroup) 1119 assert len(myexc.exceptions) == 1 1120 assert isinstance(myexc.exceptions[0], ValueError) 1121 """ 1122 self._fail_reason = None 1123 if exception is None: 1124 self._fail_reason = "exception is None" 1125 return False 1126 if not isinstance(exception, BaseExceptionGroup): 1127 # we opt to only print type of the exception here, as the repr would 1128 # likely be quite long 1129 not_group_msg = f"`{type(exception).__name__}()` is not an exception group" 1130 if len(self.expected_exceptions) > 1: 1131 self._fail_reason = not_group_msg 1132 return False 1133 # if we have 1 expected exception, check if it would work even if 1134 # allow_unwrapped is not set 1135 res = self._check_expected(self.expected_exceptions[0], exception) 1136 if res is None and self.allow_unwrapped: 1137 return True 1138 1139 if res is None: 1140 self._fail_reason = ( 1141 f"{not_group_msg}, but would match with `allow_unwrapped=True`" 1142 ) 1143 elif self.allow_unwrapped: 1144 self._fail_reason = res 1145 else: 1146 self._fail_reason = not_group_msg 1147 return False 1148 1149 actual_exceptions: Sequence[BaseException] = exception.exceptions 1150 if self.flatten_subgroups: 1151 actual_exceptions = self._unroll_exceptions(actual_exceptions) 1152 1153 if not self._check_match(exception): 1154 self._fail_reason = cast(str, self._fail_reason) 1155 old_reason = self._fail_reason 1156 if ( 1157 len(actual_exceptions) == len(self.expected_exceptions) == 1 1158 and isinstance(expected := self.expected_exceptions[0], type) 1159 and isinstance(actual := actual_exceptions[0], expected) 1160 and self._check_match(actual) 1161 ): 1162 assert self.match is not None, "can't be None if _check_match failed" 1163 assert self._fail_reason is old_reason is not None 1164 self._fail_reason += ( 1165 f"\n" 1166 f" but matched the expected `{self._repr_expected(expected)}`.\n" 1167 f" You might want " 1168 f"`RaisesGroup(RaisesExc({expected.__name__}, match={_match_pattern(self.match)!r}))`" 1169 ) 1170 else: 1171 self._fail_reason = old_reason 1172 return False 1173 1174 # do the full check on expected exceptions 1175 if not self._check_exceptions( 1176 exception, 1177 actual_exceptions, 1178 ): 1179 self._fail_reason = cast(str, self._fail_reason) 1180 assert self._fail_reason is not None 1181 old_reason = self._fail_reason 1182 # if we're not expecting a nested structure, and there is one, do a second 1183 # pass where we try flattening it 1184 if ( 1185 not self.flatten_subgroups 1186 and not any( 1187 isinstance(e, RaisesGroup) for e in self.expected_exceptions 1188 ) 1189 and any(isinstance(e, BaseExceptionGroup) for e in actual_exceptions) 1190 and self._check_exceptions( 1191 exception, 1192 self._unroll_exceptions(exception.exceptions), 1193 ) 1194 ): 1195 # only indent if it's a single-line reason. In a multi-line there's already 1196 # indented lines that this does not belong to. 1197 indent = " " if "\n" not in self._fail_reason else "" 1198 self._fail_reason = ( 1199 old_reason 1200 + f"\n{indent}Did you mean to use `flatten_subgroups=True`?" 1201 ) 1202 else: 1203 self._fail_reason = old_reason 1204 return False 1205 1206 # Only run `self.check` once we know `exception` is of the correct type. 1207 if not self._check_check(exception): 1208 reason = ( 1209 cast(str, self._fail_reason) + f" on the {type(exception).__name__}" 1210 ) 1211 if ( 1212 len(actual_exceptions) == len(self.expected_exceptions) == 1 1213 and isinstance(expected := self.expected_exceptions[0], type) 1214 # we explicitly break typing here :) 1215 and self._check_check(actual_exceptions[0]) # type: ignore[arg-type] 1216 ): 1217 self._fail_reason = reason + ( 1218 f", but did return True for the expected {self._repr_expected(expected)}." 1219 f" You might want RaisesGroup(RaisesExc({expected.__name__}, check=<...>))" 1220 ) 1221 else: 1222 self._fail_reason = reason 1223 return False 1224 1225 return True 1226 1227 @staticmethod 1228 def _check_expected( 1229 expected_type: ( 1230 type[BaseException] | RaisesExc[BaseException] | RaisesGroup[BaseException] 1231 ), 1232 exception: BaseException, 1233 ) -> str | None: 1234 """Helper method for `RaisesGroup.matches` and `RaisesGroup._check_exceptions` 1235 to check one of potentially several expected exceptions.""" 1236 if isinstance(expected_type, type): 1237 return _check_raw_type(expected_type, exception) 1238 res = expected_type.matches(exception) 1239 if res: 1240 return None 1241 assert expected_type.fail_reason is not None 1242 if expected_type.fail_reason.startswith("\n"): 1243 return f"\n{expected_type!r}: {indent(expected_type.fail_reason, ' ')}" 1244 return f"{expected_type!r}: {expected_type.fail_reason}" 1245 1246 @staticmethod 1247 def _repr_expected(e: type[BaseException] | AbstractRaises[BaseException]) -> str: 1248 """Get the repr of an expected type/RaisesExc/RaisesGroup, but we only want 1249 the name if it's a type""" 1250 if isinstance(e, type): 1251 return _exception_type_name(e) 1252 return repr(e) 1253 1254 @overload 1255 def _check_exceptions( 1256 self: RaisesGroup[ExcT_1], 1257 _exception: Exception, 1258 actual_exceptions: Sequence[Exception], 1259 ) -> TypeGuard[ExceptionGroup[ExcT_1]]: ... 1260 @overload 1261 def _check_exceptions( 1262 self: RaisesGroup[BaseExcT_1], 1263 _exception: BaseException, 1264 actual_exceptions: Sequence[BaseException], 1265 ) -> TypeGuard[BaseExceptionGroup[BaseExcT_1]]: ... 1266 1267 def _check_exceptions( 1268 self, 1269 _exception: BaseException, 1270 actual_exceptions: Sequence[BaseException], 1271 ) -> bool: 1272 """Helper method for RaisesGroup.matches that attempts to pair up expected and actual exceptions""" 1273 # The _exception parameter is not used, but necessary for the TypeGuard 1274 1275 # full table with all results 1276 results = ResultHolder(self.expected_exceptions, actual_exceptions) 1277 1278 # (indexes of) raised exceptions that haven't (yet) found an expected 1279 remaining_actual = list(range(len(actual_exceptions))) 1280 # (indexes of) expected exceptions that haven't found a matching raised 1281 failed_expected: list[int] = [] 1282 # successful greedy matches 1283 matches: dict[int, int] = {} 1284 1285 # loop over expected exceptions first to get a more predictable result 1286 for i_exp, expected in enumerate(self.expected_exceptions): 1287 for i_rem in remaining_actual: 1288 res = self._check_expected(expected, actual_exceptions[i_rem]) 1289 results.set_result(i_exp, i_rem, res) 1290 if res is None: 1291 remaining_actual.remove(i_rem) 1292 matches[i_exp] = i_rem 1293 break 1294 else: 1295 failed_expected.append(i_exp) 1296 1297 # All exceptions matched up successfully 1298 if not remaining_actual and not failed_expected: 1299 return True 1300 1301 # in case of a single expected and single raised we simplify the output 1302 if 1 == len(actual_exceptions) == len(self.expected_exceptions): 1303 assert not matches 1304 self._fail_reason = res 1305 return False 1306 1307 # The test case is failing, so we can do a slow and exhaustive check to find 1308 # duplicate matches etc that will be helpful in debugging 1309 for i_exp, expected in enumerate(self.expected_exceptions): 1310 for i_actual, actual in enumerate(actual_exceptions): 1311 if results.has_result(i_exp, i_actual): 1312 continue 1313 results.set_result( 1314 i_exp, i_actual, self._check_expected(expected, actual) 1315 ) 1316 1317 successful_str = ( 1318 f"{len(matches)} matched exception{'s' if len(matches) > 1 else ''}. " 1319 if matches 1320 else "" 1321 ) 1322 1323 # all expected were found 1324 if not failed_expected and results.no_match_for_actual(remaining_actual): 1325 self._fail_reason = ( 1326 f"{successful_str}Unexpected exception(s):" 1327 f" {[actual_exceptions[i] for i in remaining_actual]!r}" 1328 ) 1329 return False 1330 # all raised exceptions were expected 1331 if not remaining_actual and results.no_match_for_expected(failed_expected): 1332 no_match_for_str = ", ".join( 1333 self._repr_expected(self.expected_exceptions[i]) 1334 for i in failed_expected 1335 ) 1336 self._fail_reason = f"{successful_str}Too few exceptions raised, found no match for: [{no_match_for_str}]" 1337 return False 1338 1339 # if there's only one remaining and one failed, and the unmatched didn't match anything else, 1340 # we elect to only print why the remaining and the failed didn't match. 1341 if ( 1342 1 == len(remaining_actual) == len(failed_expected) 1343 and results.no_match_for_actual(remaining_actual) 1344 and results.no_match_for_expected(failed_expected) 1345 ): 1346 self._fail_reason = f"{successful_str}{results.get_result(failed_expected[0], remaining_actual[0])}" 1347 return False 1348 1349 # there's both expected and raised exceptions without matches 1350 s = "" 1351 if matches: 1352 s += f"\n{successful_str}" 1353 indent_1 = " " * 2 1354 indent_2 = " " * 4 1355 1356 if not remaining_actual: 1357 s += "\nToo few exceptions raised!" 1358 elif not failed_expected: 1359 s += "\nUnexpected exception(s)!" 1360 1361 if failed_expected: 1362 s += "\nThe following expected exceptions did not find a match:" 1363 rev_matches = {v: k for k, v in matches.items()} 1364 for i_failed in failed_expected: 1365 s += ( 1366 f"\n{indent_1}{self._repr_expected(self.expected_exceptions[i_failed])}" 1367 ) 1368 for i_actual, actual in enumerate(actual_exceptions): 1369 if results.get_result(i_exp, i_actual) is None: 1370 # we print full repr of match target 1371 s += ( 1372 f"\n{indent_2}It matches {backquote(repr(actual))} which was paired with " 1373 + backquote( 1374 self._repr_expected( 1375 self.expected_exceptions[rev_matches[i_actual]] 1376 ) 1377 ) 1378 ) 1379 1380 if remaining_actual: 1381 s += "\nThe following raised exceptions did not find a match" 1382 for i_actual in remaining_actual: 1383 s += f"\n{indent_1}{actual_exceptions[i_actual]!r}:" 1384 for i_exp, expected in enumerate(self.expected_exceptions): 1385 res = results.get_result(i_exp, i_actual) 1386 if i_exp in failed_expected: 1387 assert res is not None 1388 if res[0] != "\n": 1389 s += "\n" 1390 s += indent(res, indent_2) 1391 if res is None: 1392 # we print full repr of match target 1393 s += ( 1394 f"\n{indent_2}It matches {backquote(self._repr_expected(expected))} " 1395 f"which was paired with {backquote(repr(actual_exceptions[matches[i_exp]]))}" 1396 ) 1397 1398 if len(self.expected_exceptions) == len(actual_exceptions) and possible_match( 1399 results 1400 ): 1401 s += ( 1402 "\nThere exist a possible match when attempting an exhaustive check," 1403 " but RaisesGroup uses a greedy algorithm. " 1404 "Please make your expected exceptions more stringent with `RaisesExc` etc" 1405 " so the greedy algorithm can function." 1406 ) 1407 self._fail_reason = s 1408 return False 1409 1410 def __exit__( 1411 self, 1412 exc_type: type[BaseException] | None, 1413 exc_val: BaseException | None, 1414 exc_tb: types.TracebackType | None, 1415 ) -> bool: 1416 __tracebackhide__ = True 1417 if exc_type is None: 1418 fail(f"DID NOT RAISE any exception, expected `{self.expected_type()}`") 1419 1420 assert self.excinfo is not None, ( 1421 "Internal error - should have been constructed in __enter__" 1422 ) 1423 1424 # group_str is the only thing that differs between RaisesExc and RaisesGroup... 1425 # I might just scrap it? Or make it part of fail_reason 1426 group_str = ( 1427 "(group)" 1428 if self.allow_unwrapped and not issubclass(exc_type, BaseExceptionGroup) 1429 else "group" 1430 ) 1431 1432 if not self.matches(exc_val): 1433 fail(f"Raised exception {group_str} did not match: {self._fail_reason}") 1434 1435 # Cast to narrow the exception type now that it's verified.... 1436 # even though the TypeGuard in self.matches should be narrowing 1437 exc_info = cast( 1438 "tuple[type[BaseExceptionGroup[BaseExcT_co]], BaseExceptionGroup[BaseExcT_co], types.TracebackType]", 1439 (exc_type, exc_val, exc_tb), 1440 ) 1441 self.excinfo.fill_unfilled(exc_info) 1442 return True 1443 1444 def expected_type(self) -> str: 1445 subexcs = [] 1446 for e in self.expected_exceptions: 1447 if isinstance(e, RaisesExc): 1448 subexcs.append(repr(e)) 1449 elif isinstance(e, RaisesGroup): 1450 subexcs.append(e.expected_type()) 1451 elif isinstance(e, type): 1452 subexcs.append(e.__name__) 1453 else: # pragma: no cover 1454 raise AssertionError("unknown type") 1455 group_type = "Base" if self.is_baseexception else "" 1456 return f"{group_type}ExceptionGroup({', '.join(subexcs)})"
New in version 8.4.
Contextmanager for checking for an expected ExceptionGroup.
This works similar to pytest.raises(), but allows for specifying the structure of an ExceptionGroup.
ExceptionInfo.group_contains() also tries to handle exception groups,
but it is very bad at checking that you didn't get unexpected exceptions.
The catching behaviour differs from :ref:except* <except_star>, being much
stricter about the structure by default.
By using allow_unwrapped=True and flatten_subgroups=True you can match
:ref:except* <except_star> fully when expecting a single exception.
Parameters
args: Any number of exception types,
RaisesGrouporRaisesExcto specify the exceptions contained in this exception. All specified exceptions must be present in the raised group, and no others.If you expect a variable number of exceptions you need to use
pytest.raises(ExceptionGroup) <pytest.raises>()and manually check the contained exceptions. Consider making use ofRaisesExc.matches().It does not care about the order of the exceptions, so
RaisesGroup(ValueError, TypeError)is equivalent toRaisesGroup(TypeError, ValueError). :kwparam str | re.Pattern[str] | None match: If specified, a string containing a regular expression, or a regular expression object, that is tested against the string representation of the exception group and its :pep:678__notes__usingre.search().To match a literal string that may contain :ref:
special characters <re-syntax>, the pattern can first be escaped withre.escape().Note that " (5 subgroups)" will be stripped from the
reprbefore matching. :kwparam Callable[[E], bool] check: If specified, a callable that will be called with the group as a parameter after successfully matching the expected exceptions. If it returnsTrueit will be considered a match, if not it will be considered a failed match. :kwparam bool allow_unwrapped: If expecting a single exception orRaisesExcit will match even if the exception is not inside an exceptiongroup.Using this together with
match,checkor expecting multiple exceptions will raise an error. :kwparam bool flatten_subgroups: "flatten" any groups inside the raised exception group, extracting all exceptions inside any nested groups, before matching. Without this it expects you to fully specify the nesting structure by passingRaisesGroupas expected parameter.
Examples::
with RaisesGroup(ValueError):
raise ExceptionGroup("", (ValueError(),))
# match
with RaisesGroup(
ValueError,
ValueError,
RaisesExc(TypeError, match="^expected int$"),
match="^my group$",
):
raise ExceptionGroup(
"my group",
[
ValueError(),
TypeError("expected int"),
ValueError(),
],
)
# check
with RaisesGroup(
KeyboardInterrupt,
match="^hello$",
check=lambda x: isinstance(x.__cause__, ValueError),
):
raise BaseExceptionGroup("hello", [KeyboardInterrupt()]) from ValueError
# nested groups
with RaisesGroup(RaisesGroup(ValueError)):
raise ExceptionGroup("", (ExceptionGroup("", (ValueError(),)),))
# flatten_subgroups
with RaisesGroup(ValueError, flatten_subgroups=True):
raise ExceptionGroup("", (ExceptionGroup("", (ValueError(),)),))
# allow_unwrapped
with RaisesGroup(ValueError, allow_unwrapped=True):
raise ValueError
RaisesGroup.matches() can also be used directly to check a standalone exception group.
The matching algorithm is greedy, which means cases such as this may fail::
with RaisesGroup(ValueError, RaisesExc(ValueError, match="hello")):
raise ExceptionGroup("", (ValueError("hello"), ValueError("goodbye")))
even though it generally does not care about the order of the exceptions in the group.
To avoid the above you should specify the first ValueError with a RaisesExc as well.
When raised exceptions don't match the expected ones, you'll get a detailed error
message explaining why. This includes repr(check) if set, which in Python can be
overly verbose, showing memory locations etc etc.
If installed and imported (in e.g. conftest.py), the hypothesis library will
monkeypatch this output to provide shorter & more readable repr's.
949 def __init__( 950 self: RaisesGroup[ExcT_1 | BaseExcT_1 | BaseExceptionGroup[BaseExcT_2]], 951 expected_exception: type[BaseExcT_1] 952 | RaisesExc[BaseExcT_1] 953 | RaisesGroup[BaseExcT_2], 954 /, 955 *other_exceptions: type[BaseExcT_1] 956 | RaisesExc[BaseExcT_1] 957 | RaisesGroup[BaseExcT_2], 958 allow_unwrapped: bool = False, 959 flatten_subgroups: bool = False, 960 match: str | Pattern[str] | None = None, 961 check: ( 962 Callable[[BaseExceptionGroup[BaseExcT_1]], bool] 963 | Callable[[ExceptionGroup[ExcT_1]], bool] 964 | None 965 ) = None, 966 ): 967 # The type hint on the `self` and `check` parameters uses different formats 968 # that are *very* hard to reconcile while adhering to the overloads, so we cast 969 # it to avoid an error when passing it to super().__init__ 970 check = cast( 971 "Callable[[BaseExceptionGroup[ExcT_1|BaseExcT_1|BaseExceptionGroup[BaseExcT_2]]], bool]", 972 check, 973 ) 974 super().__init__(match=match, check=check) 975 self.allow_unwrapped = allow_unwrapped 976 self.flatten_subgroups: bool = flatten_subgroups 977 self.is_baseexception = False 978 979 if allow_unwrapped and other_exceptions: 980 raise ValueError( 981 "You cannot specify multiple exceptions with `allow_unwrapped=True.`" 982 " If you want to match one of multiple possible exceptions you should" 983 " use a `RaisesExc`." 984 " E.g. `RaisesExc(check=lambda e: isinstance(e, (...)))`", 985 ) 986 if allow_unwrapped and isinstance(expected_exception, RaisesGroup): 987 raise ValueError( 988 "`allow_unwrapped=True` has no effect when expecting a `RaisesGroup`." 989 " You might want it in the expected `RaisesGroup`, or" 990 " `flatten_subgroups=True` if you don't care about the structure.", 991 ) 992 if allow_unwrapped and (match is not None or check is not None): 993 raise ValueError( 994 "`allow_unwrapped=True` bypasses the `match` and `check` parameters" 995 " if the exception is unwrapped. If you intended to match/check the" 996 " exception you should use a `RaisesExc` object. If you want to match/check" 997 " the exceptiongroup when the exception *is* wrapped you need to" 998 " do e.g. `if isinstance(exc.value, ExceptionGroup):" 999 " assert RaisesGroup(...).matches(exc.value)` afterwards.", 1000 ) 1001 1002 self.expected_exceptions: tuple[ 1003 type[BaseExcT_co] | RaisesExc[BaseExcT_co] | RaisesGroup[BaseException], ... 1004 ] = tuple( 1005 self._parse_excgroup(e, "a BaseException type, RaisesExc, or RaisesGroup") 1006 for e in ( 1007 expected_exception, 1008 *other_exceptions, 1009 ) 1010 )
1104 def matches( 1105 self, 1106 exception: BaseException | None, 1107 ) -> bool: 1108 """Check if an exception matches the requirements of this RaisesGroup. 1109 If it fails, `RaisesGroup.fail_reason` will be set. 1110 1111 Example:: 1112 1113 with pytest.raises(TypeError) as excinfo: 1114 ... 1115 assert RaisesGroup(ValueError).matches(excinfo.value.__cause__) 1116 # the above line is equivalent to 1117 myexc = excinfo.value.__cause 1118 assert isinstance(myexc, BaseExceptionGroup) 1119 assert len(myexc.exceptions) == 1 1120 assert isinstance(myexc.exceptions[0], ValueError) 1121 """ 1122 self._fail_reason = None 1123 if exception is None: 1124 self._fail_reason = "exception is None" 1125 return False 1126 if not isinstance(exception, BaseExceptionGroup): 1127 # we opt to only print type of the exception here, as the repr would 1128 # likely be quite long 1129 not_group_msg = f"`{type(exception).__name__}()` is not an exception group" 1130 if len(self.expected_exceptions) > 1: 1131 self._fail_reason = not_group_msg 1132 return False 1133 # if we have 1 expected exception, check if it would work even if 1134 # allow_unwrapped is not set 1135 res = self._check_expected(self.expected_exceptions[0], exception) 1136 if res is None and self.allow_unwrapped: 1137 return True 1138 1139 if res is None: 1140 self._fail_reason = ( 1141 f"{not_group_msg}, but would match with `allow_unwrapped=True`" 1142 ) 1143 elif self.allow_unwrapped: 1144 self._fail_reason = res 1145 else: 1146 self._fail_reason = not_group_msg 1147 return False 1148 1149 actual_exceptions: Sequence[BaseException] = exception.exceptions 1150 if self.flatten_subgroups: 1151 actual_exceptions = self._unroll_exceptions(actual_exceptions) 1152 1153 if not self._check_match(exception): 1154 self._fail_reason = cast(str, self._fail_reason) 1155 old_reason = self._fail_reason 1156 if ( 1157 len(actual_exceptions) == len(self.expected_exceptions) == 1 1158 and isinstance(expected := self.expected_exceptions[0], type) 1159 and isinstance(actual := actual_exceptions[0], expected) 1160 and self._check_match(actual) 1161 ): 1162 assert self.match is not None, "can't be None if _check_match failed" 1163 assert self._fail_reason is old_reason is not None 1164 self._fail_reason += ( 1165 f"\n" 1166 f" but matched the expected `{self._repr_expected(expected)}`.\n" 1167 f" You might want " 1168 f"`RaisesGroup(RaisesExc({expected.__name__}, match={_match_pattern(self.match)!r}))`" 1169 ) 1170 else: 1171 self._fail_reason = old_reason 1172 return False 1173 1174 # do the full check on expected exceptions 1175 if not self._check_exceptions( 1176 exception, 1177 actual_exceptions, 1178 ): 1179 self._fail_reason = cast(str, self._fail_reason) 1180 assert self._fail_reason is not None 1181 old_reason = self._fail_reason 1182 # if we're not expecting a nested structure, and there is one, do a second 1183 # pass where we try flattening it 1184 if ( 1185 not self.flatten_subgroups 1186 and not any( 1187 isinstance(e, RaisesGroup) for e in self.expected_exceptions 1188 ) 1189 and any(isinstance(e, BaseExceptionGroup) for e in actual_exceptions) 1190 and self._check_exceptions( 1191 exception, 1192 self._unroll_exceptions(exception.exceptions), 1193 ) 1194 ): 1195 # only indent if it's a single-line reason. In a multi-line there's already 1196 # indented lines that this does not belong to. 1197 indent = " " if "\n" not in self._fail_reason else "" 1198 self._fail_reason = ( 1199 old_reason 1200 + f"\n{indent}Did you mean to use `flatten_subgroups=True`?" 1201 ) 1202 else: 1203 self._fail_reason = old_reason 1204 return False 1205 1206 # Only run `self.check` once we know `exception` is of the correct type. 1207 if not self._check_check(exception): 1208 reason = ( 1209 cast(str, self._fail_reason) + f" on the {type(exception).__name__}" 1210 ) 1211 if ( 1212 len(actual_exceptions) == len(self.expected_exceptions) == 1 1213 and isinstance(expected := self.expected_exceptions[0], type) 1214 # we explicitly break typing here :) 1215 and self._check_check(actual_exceptions[0]) # type: ignore[arg-type] 1216 ): 1217 self._fail_reason = reason + ( 1218 f", but did return True for the expected {self._repr_expected(expected)}." 1219 f" You might want RaisesGroup(RaisesExc({expected.__name__}, check=<...>))" 1220 ) 1221 else: 1222 self._fail_reason = reason 1223 return False 1224 1225 return True
Check if an exception matches the requirements of this RaisesGroup.
If it fails, RaisesGroup.fail_reason will be set.
Example::
with pytest.raises(TypeError) as excinfo:
...
assert RaisesGroup(ValueError).matches(excinfo.value.__cause__)
# the above line is equivalent to
myexc = excinfo.value.__cause
assert isinstance(myexc, BaseExceptionGroup)
assert len(myexc.exceptions) == 1
assert isinstance(myexc.exceptions[0], ValueError)
1444 def expected_type(self) -> str: 1445 subexcs = [] 1446 for e in self.expected_exceptions: 1447 if isinstance(e, RaisesExc): 1448 subexcs.append(repr(e)) 1449 elif isinstance(e, RaisesGroup): 1450 subexcs.append(e.expected_type()) 1451 elif isinstance(e, type): 1452 subexcs.append(e.__name__) 1453 else: # pragma: no cover 1454 raise AssertionError("unknown type") 1455 group_type = "Base" if self.is_baseexception else "" 1456 return f"{group_type}ExceptionGroup({', '.join(subexcs)})"
221@final 222class RecordedHookCall: 223 """A recorded call to a hook. 224 225 The arguments to the hook call are set as attributes. 226 For example: 227 228 .. code-block:: python 229 230 calls = hook_recorder.getcalls("pytest_runtest_setup") 231 # Suppose pytest_runtest_setup was called once with `item=an_item`. 232 assert calls[0].item is an_item 233 """ 234 235 def __init__(self, name: str, kwargs) -> None: 236 self.__dict__.update(kwargs) 237 self._name = name 238 239 def __repr__(self) -> str: 240 d = self.__dict__.copy() 241 del d["_name"] 242 return f"<RecordedHookCall {self._name!r}(**{d!r})>" 243 244 if TYPE_CHECKING: 245 # The class has undetermined attributes, this tells mypy about it. 246 def __getattr__(self, key: str): ...
A recorded call to a hook.
The arguments to the hook call are set as attributes. For example:
calls = hook_recorder.getcalls("pytest_runtest_setup")
# Suppose pytest_runtest_setup was called once with `item=an_item`.
assert calls[0].item is an_item
518@final 519class RunResult: 520 """The result of running a command from :class:`~pytest.Pytester`.""" 521 522 def __init__( 523 self, 524 ret: int | ExitCode, 525 outlines: list[str], 526 errlines: list[str], 527 duration: float, 528 ) -> None: 529 try: 530 self.ret: int | ExitCode = ExitCode(ret) 531 """The return value.""" 532 except ValueError: 533 self.ret = ret 534 self.outlines = outlines 535 """List of lines captured from stdout.""" 536 self.errlines = errlines 537 """List of lines captured from stderr.""" 538 self.stdout = LineMatcher(outlines) 539 """:class:`~pytest.LineMatcher` of stdout. 540 541 Use e.g. :func:`str(stdout) <pytest.LineMatcher.__str__()>` to reconstruct stdout, or the commonly used 542 :func:`stdout.fnmatch_lines() <pytest.LineMatcher.fnmatch_lines()>` method. 543 """ 544 self.stderr = LineMatcher(errlines) 545 """:class:`~pytest.LineMatcher` of stderr.""" 546 self.duration = duration 547 """Duration in seconds.""" 548 549 def __repr__(self) -> str: 550 return ( 551 f"<RunResult ret={self.ret!s} " 552 f"len(stdout.lines)={len(self.stdout.lines)} " 553 f"len(stderr.lines)={len(self.stderr.lines)} " 554 f"duration={self.duration:.2f}s>" 555 ) 556 557 def parseoutcomes(self) -> dict[str, int]: 558 """Return a dictionary of outcome noun -> count from parsing the terminal 559 output that the test process produced. 560 561 The returned nouns will always be in plural form:: 562 563 ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== 564 565 Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. 566 """ 567 return self.parse_summary_nouns(self.outlines) 568 569 @classmethod 570 def parse_summary_nouns(cls, lines) -> dict[str, int]: 571 """Extract the nouns from a pytest terminal summary line. 572 573 It always returns the plural noun for consistency:: 574 575 ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== 576 577 Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. 578 """ 579 for line in reversed(lines): 580 if rex_session_duration.search(line): 581 outcomes = rex_outcome.findall(line) 582 ret = {noun: int(count) for (count, noun) in outcomes} 583 break 584 else: 585 raise ValueError("Pytest terminal summary report not found") 586 587 to_plural = { 588 "warning": "warnings", 589 "error": "errors", 590 } 591 return {to_plural.get(k, k): v for k, v in ret.items()} 592 593 def assert_outcomes( 594 self, 595 passed: int = 0, 596 skipped: int = 0, 597 failed: int = 0, 598 errors: int = 0, 599 xpassed: int = 0, 600 xfailed: int = 0, 601 warnings: int | None = None, 602 deselected: int | None = None, 603 ) -> None: 604 """ 605 Assert that the specified outcomes appear with the respective 606 numbers (0 means it didn't occur) in the text output from a test run. 607 608 ``warnings`` and ``deselected`` are only checked if not None. 609 """ 610 __tracebackhide__ = True 611 from _pytest.pytester_assertions import assert_outcomes 612 613 outcomes = self.parseoutcomes() 614 assert_outcomes( 615 outcomes, 616 passed=passed, 617 skipped=skipped, 618 failed=failed, 619 errors=errors, 620 xpassed=xpassed, 621 xfailed=xfailed, 622 warnings=warnings, 623 deselected=deselected, 624 )
The result of running a command from ~pytest.Pytester.
522 def __init__( 523 self, 524 ret: int | ExitCode, 525 outlines: list[str], 526 errlines: list[str], 527 duration: float, 528 ) -> None: 529 try: 530 self.ret: int | ExitCode = ExitCode(ret) 531 """The return value.""" 532 except ValueError: 533 self.ret = ret 534 self.outlines = outlines 535 """List of lines captured from stdout.""" 536 self.errlines = errlines 537 """List of lines captured from stderr.""" 538 self.stdout = LineMatcher(outlines) 539 """:class:`~pytest.LineMatcher` of stdout. 540 541 Use e.g. :func:`str(stdout) <pytest.LineMatcher.__str__()>` to reconstruct stdout, or the commonly used 542 :func:`stdout.fnmatch_lines() <pytest.LineMatcher.fnmatch_lines()>` method. 543 """ 544 self.stderr = LineMatcher(errlines) 545 """:class:`~pytest.LineMatcher` of stderr.""" 546 self.duration = duration 547 """Duration in seconds."""
~pytest.LineMatcher of stdout.
Use e.g. str(stdout) <pytest.LineMatcher.__str__()>() to reconstruct stdout, or the commonly used
stdout.fnmatch_lines() <pytest.LineMatcher.fnmatch_lines()>() method.
557 def parseoutcomes(self) -> dict[str, int]: 558 """Return a dictionary of outcome noun -> count from parsing the terminal 559 output that the test process produced. 560 561 The returned nouns will always be in plural form:: 562 563 ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== 564 565 Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. 566 """ 567 return self.parse_summary_nouns(self.outlines)
Return a dictionary of outcome noun -> count from parsing the terminal output that the test process produced.
The returned nouns will always be in plural form::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
Will return {"failed": 1, "passed": 1, "warnings": 1, "errors": 1}.
569 @classmethod 570 def parse_summary_nouns(cls, lines) -> dict[str, int]: 571 """Extract the nouns from a pytest terminal summary line. 572 573 It always returns the plural noun for consistency:: 574 575 ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== 576 577 Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. 578 """ 579 for line in reversed(lines): 580 if rex_session_duration.search(line): 581 outcomes = rex_outcome.findall(line) 582 ret = {noun: int(count) for (count, noun) in outcomes} 583 break 584 else: 585 raise ValueError("Pytest terminal summary report not found") 586 587 to_plural = { 588 "warning": "warnings", 589 "error": "errors", 590 } 591 return {to_plural.get(k, k): v for k, v in ret.items()}
Extract the nouns from a pytest terminal summary line.
It always returns the plural noun for consistency::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
Will return {"failed": 1, "passed": 1, "warnings": 1, "errors": 1}.
593 def assert_outcomes( 594 self, 595 passed: int = 0, 596 skipped: int = 0, 597 failed: int = 0, 598 errors: int = 0, 599 xpassed: int = 0, 600 xfailed: int = 0, 601 warnings: int | None = None, 602 deselected: int | None = None, 603 ) -> None: 604 """ 605 Assert that the specified outcomes appear with the respective 606 numbers (0 means it didn't occur) in the text output from a test run. 607 608 ``warnings`` and ``deselected`` are only checked if not None. 609 """ 610 __tracebackhide__ = True 611 from _pytest.pytester_assertions import assert_outcomes 612 613 outcomes = self.parseoutcomes() 614 assert_outcomes( 615 outcomes, 616 passed=passed, 617 skipped=skipped, 618 failed=failed, 619 errors=errors, 620 xpassed=xpassed, 621 xfailed=xfailed, 622 warnings=warnings, 623 deselected=deselected, 624 )
Assert that the specified outcomes appear with the respective numbers (0 means it didn't occur) in the text output from a test run.
warnings and deselected are only checked if not None.
577@final 578class Session(nodes.Collector): 579 """The root of the collection tree. 580 581 ``Session`` collects the initial paths given as arguments to pytest. 582 """ 583 584 Interrupted = Interrupted 585 Failed = Failed 586 # Set on the session by runner.pytest_sessionstart. 587 _setupstate: SetupState 588 # Set on the session by fixtures.pytest_sessionstart. 589 _fixturemanager: FixtureManager 590 exitstatus: int | ExitCode 591 592 def __init__(self, config: Config) -> None: 593 super().__init__( 594 name="", 595 path=config.rootpath, 596 fspath=None, 597 parent=None, 598 config=config, 599 session=self, 600 nodeid="", 601 ) 602 self.testsfailed = 0 603 self.testscollected = 0 604 self._shouldstop: bool | str = False 605 self._shouldfail: bool | str = False 606 self.trace = config.trace.root.get("collection") 607 self._initialpaths: frozenset[Path] = frozenset() 608 self._initialpaths_with_parents: frozenset[Path] = frozenset() 609 self._notfound: list[tuple[str, Sequence[nodes.Collector]]] = [] 610 self._initial_parts: list[CollectionArgument] = [] 611 self._collection_cache: dict[nodes.Collector, CollectReport] = {} 612 self.items: list[nodes.Item] = [] 613 614 self._bestrelpathcache: dict[Path, str] = _bestrelpath_cache(config.rootpath) 615 616 self.config.pluginmanager.register(self, name="session") 617 618 @classmethod 619 def from_config(cls, config: Config) -> Session: 620 session: Session = cls._create(config=config) 621 return session 622 623 def __repr__(self) -> str: 624 return ( 625 f"<{self.__class__.__name__} {self.name} " 626 f"exitstatus=%r " 627 f"testsfailed={self.testsfailed} " 628 f"testscollected={self.testscollected}>" 629 ) % getattr(self, "exitstatus", "<UNSET>") 630 631 @property 632 def shouldstop(self) -> bool | str: 633 return self._shouldstop 634 635 @shouldstop.setter 636 def shouldstop(self, value: bool | str) -> None: 637 # The runner checks shouldfail and assumes that if it is set we are 638 # definitely stopping, so prevent unsetting it. 639 if value is False and self._shouldstop: 640 warnings.warn( 641 PytestWarning( 642 "session.shouldstop cannot be unset after it has been set; ignoring." 643 ), 644 stacklevel=2, 645 ) 646 return 647 self._shouldstop = value 648 649 @property 650 def shouldfail(self) -> bool | str: 651 return self._shouldfail 652 653 @shouldfail.setter 654 def shouldfail(self, value: bool | str) -> None: 655 # The runner checks shouldfail and assumes that if it is set we are 656 # definitely stopping, so prevent unsetting it. 657 if value is False and self._shouldfail: 658 warnings.warn( 659 PytestWarning( 660 "session.shouldfail cannot be unset after it has been set; ignoring." 661 ), 662 stacklevel=2, 663 ) 664 return 665 self._shouldfail = value 666 667 @property 668 def startpath(self) -> Path: 669 """The path from which pytest was invoked. 670 671 .. versionadded:: 7.0.0 672 """ 673 return self.config.invocation_params.dir 674 675 def _node_location_to_relpath(self, node_path: Path) -> str: 676 # bestrelpath is a quite slow function. 677 return self._bestrelpathcache[node_path] 678 679 @hookimpl(tryfirst=True) 680 def pytest_collectstart(self) -> None: 681 if self.shouldfail: 682 raise self.Failed(self.shouldfail) 683 if self.shouldstop: 684 raise self.Interrupted(self.shouldstop) 685 686 @hookimpl(tryfirst=True) 687 def pytest_runtest_logreport(self, report: TestReport | CollectReport) -> None: 688 if report.failed and not hasattr(report, "wasxfail"): 689 self.testsfailed += 1 690 maxfail = self.config.getvalue("maxfail") 691 if maxfail and self.testsfailed >= maxfail: 692 self.shouldfail = f"stopping after {self.testsfailed} failures" 693 694 pytest_collectreport = pytest_runtest_logreport 695 696 def isinitpath( 697 self, 698 path: str | os.PathLike[str], 699 *, 700 with_parents: bool = False, 701 ) -> bool: 702 """Is path an initial path? 703 704 An initial path is a path explicitly given to pytest on the command 705 line. 706 707 :param with_parents: 708 If set, also return True if the path is a parent of an initial path. 709 710 .. versionchanged:: 8.0 711 Added the ``with_parents`` parameter. 712 """ 713 # Optimization: Path(Path(...)) is much slower than isinstance. 714 path_ = path if isinstance(path, Path) else Path(path) 715 if with_parents: 716 return path_ in self._initialpaths_with_parents 717 else: 718 return path_ in self._initialpaths 719 720 def gethookproxy(self, fspath: os.PathLike[str]) -> pluggy.HookRelay: 721 # Optimization: Path(Path(...)) is much slower than isinstance. 722 path = fspath if isinstance(fspath, Path) else Path(fspath) 723 pm = self.config.pluginmanager 724 # Check if we have the common case of running 725 # hooks with all conftest.py files. 726 my_conftestmodules = pm._getconftestmodules(path) 727 remove_mods = pm._conftest_plugins.difference(my_conftestmodules) 728 proxy: pluggy.HookRelay 729 if remove_mods: 730 # One or more conftests are not in use at this path. 731 proxy = PathAwareHookProxy(FSHookProxy(pm, remove_mods)) # type: ignore[arg-type,assignment] 732 else: 733 # All plugins are active for this fspath. 734 proxy = self.config.hook 735 return proxy 736 737 def _collect_path( 738 self, 739 path: Path, 740 path_cache: dict[Path, Sequence[nodes.Collector]], 741 ) -> Sequence[nodes.Collector]: 742 """Create a Collector for the given path. 743 744 `path_cache` makes it so the same Collectors are returned for the same 745 path. 746 """ 747 if path in path_cache: 748 return path_cache[path] 749 750 if path.is_dir(): 751 ihook = self.gethookproxy(path.parent) 752 col: nodes.Collector | None = ihook.pytest_collect_directory( 753 path=path, parent=self 754 ) 755 cols: Sequence[nodes.Collector] = (col,) if col is not None else () 756 757 elif path.is_file(): 758 ihook = self.gethookproxy(path) 759 cols = ihook.pytest_collect_file(file_path=path, parent=self) 760 761 else: 762 # Broken symlink or invalid/missing file. 763 cols = () 764 765 path_cache[path] = cols 766 return cols 767 768 @overload 769 def perform_collect( 770 self, args: Sequence[str] | None = ..., genitems: Literal[True] = ... 771 ) -> Sequence[nodes.Item]: ... 772 773 @overload 774 def perform_collect( 775 self, args: Sequence[str] | None = ..., genitems: bool = ... 776 ) -> Sequence[nodes.Item | nodes.Collector]: ... 777 778 def perform_collect( 779 self, args: Sequence[str] | None = None, genitems: bool = True 780 ) -> Sequence[nodes.Item | nodes.Collector]: 781 """Perform the collection phase for this session. 782 783 This is called by the default :hook:`pytest_collection` hook 784 implementation; see the documentation of this hook for more details. 785 For testing purposes, it may also be called directly on a fresh 786 ``Session``. 787 788 This function normally recursively expands any collectors collected 789 from the session to their items, and only items are returned. For 790 testing purposes, this may be suppressed by passing ``genitems=False``, 791 in which case the return value contains these collectors unexpanded, 792 and ``session.items`` is empty. 793 """ 794 if args is None: 795 args = self.config.args 796 797 self.trace("perform_collect", self, args) 798 self.trace.root.indent += 1 799 800 hook = self.config.hook 801 802 self._notfound = [] 803 self._initial_parts = [] 804 self._collection_cache = {} 805 self.items = [] 806 items: Sequence[nodes.Item | nodes.Collector] = self.items 807 consider_namespace_packages: bool = self.config.getini( 808 "consider_namespace_packages" 809 ) 810 try: 811 initialpaths: list[Path] = [] 812 initialpaths_with_parents: list[Path] = [] 813 814 collection_args = [ 815 resolve_collection_argument( 816 self.config.invocation_params.dir, 817 arg, 818 i, 819 as_pypath=self.config.option.pyargs, 820 consider_namespace_packages=consider_namespace_packages, 821 ) 822 for i, arg in enumerate(args) 823 ] 824 825 if not self.config.getoption("keepduplicates"): 826 # Normalize the collection arguments -- remove duplicates and overlaps. 827 self._initial_parts = normalize_collection_arguments(collection_args) 828 else: 829 self._initial_parts = collection_args 830 831 for collection_argument in self._initial_parts: 832 initialpaths.append(collection_argument.path) 833 initialpaths_with_parents.append(collection_argument.path) 834 initialpaths_with_parents.extend(collection_argument.path.parents) 835 self._initialpaths = frozenset(initialpaths) 836 self._initialpaths_with_parents = frozenset(initialpaths_with_parents) 837 838 rep = collect_one_node(self) 839 self.ihook.pytest_collectreport(report=rep) 840 self.trace.root.indent -= 1 841 if self._notfound: 842 errors = [] 843 for arg, collectors in self._notfound: 844 if collectors: 845 errors.append( 846 f"not found: {arg}\n(no match in any of {collectors!r})" 847 ) 848 else: 849 errors.append(f"found no collectors for {arg}") 850 851 raise UsageError(*errors) 852 853 if not genitems: 854 items = rep.result 855 else: 856 if rep.passed: 857 for node in rep.result: 858 self.items.extend(self.genitems(node)) 859 860 self.config.pluginmanager.check_pending() 861 hook.pytest_collection_modifyitems( 862 session=self, config=self.config, items=items 863 ) 864 finally: 865 self._notfound = [] 866 self._initial_parts = [] 867 self._collection_cache = {} 868 hook.pytest_collection_finish(session=self) 869 870 if genitems: 871 self.testscollected = len(items) 872 873 return items 874 875 def _collect_one_node( 876 self, 877 node: nodes.Collector, 878 handle_dupes: bool = True, 879 ) -> tuple[CollectReport, bool]: 880 if node in self._collection_cache and handle_dupes: 881 rep = self._collection_cache[node] 882 return rep, True 883 else: 884 rep = collect_one_node(node) 885 self._collection_cache[node] = rep 886 return rep, False 887 888 def collect(self) -> Iterator[nodes.Item | nodes.Collector]: 889 # This is a cache for the root directories of the initial paths. 890 # We can't use collection_cache for Session because of its special 891 # role as the bootstrapping collector. 892 path_cache: dict[Path, Sequence[nodes.Collector]] = {} 893 894 pm = self.config.pluginmanager 895 896 for collection_argument in self._initial_parts: 897 self.trace("processing argument", collection_argument) 898 self.trace.root.indent += 1 899 900 argpath = collection_argument.path 901 names = collection_argument.parts 902 parametrization = collection_argument.parametrization 903 module_name = collection_argument.module_name 904 905 # resolve_collection_argument() ensures this. 906 if argpath.is_dir(): 907 assert not names, f"invalid arg {(argpath, names)!r}" 908 909 paths = [argpath] 910 # Add relevant parents of the path, from the root, e.g. 911 # /a/b/c.py -> [/, /a, /a/b, /a/b/c.py] 912 if module_name is None: 913 # Paths outside of the confcutdir should not be considered. 914 for path in argpath.parents: 915 if not pm._is_in_confcutdir(path): 916 break 917 paths.insert(0, path) 918 else: 919 # For --pyargs arguments, only consider paths matching the module 920 # name. Paths beyond the package hierarchy are not included. 921 module_name_parts = module_name.split(".") 922 for i, path in enumerate(argpath.parents, 2): 923 if i > len(module_name_parts) or path.stem != module_name_parts[-i]: 924 break 925 paths.insert(0, path) 926 927 # Start going over the parts from the root, collecting each level 928 # and discarding all nodes which don't match the level's part. 929 any_matched_in_initial_part = False 930 notfound_collectors = [] 931 work: list[tuple[nodes.Collector | nodes.Item, list[Path | str]]] = [ 932 (self, [*paths, *names]) 933 ] 934 while work: 935 matchnode, matchparts = work.pop() 936 937 # Pop'd all of the parts, this is a match. 938 if not matchparts: 939 yield matchnode 940 any_matched_in_initial_part = True 941 continue 942 943 # Should have been matched by now, discard. 944 if not isinstance(matchnode, nodes.Collector): 945 continue 946 947 # Collect this level of matching. 948 # Collecting Session (self) is done directly to avoid endless 949 # recursion to this function. 950 subnodes: Sequence[nodes.Collector | nodes.Item] 951 if isinstance(matchnode, Session): 952 assert isinstance(matchparts[0], Path) 953 subnodes = matchnode._collect_path(matchparts[0], path_cache) 954 else: 955 # For backward compat, files given directly multiple 956 # times on the command line should not be deduplicated. 957 handle_dupes = not ( 958 len(matchparts) == 1 959 and isinstance(matchparts[0], Path) 960 and matchparts[0].is_file() 961 ) 962 rep, duplicate = self._collect_one_node(matchnode, handle_dupes) 963 if not duplicate and not rep.passed: 964 # Report collection failures here to avoid failing to 965 # run some test specified in the command line because 966 # the module could not be imported (#134). 967 matchnode.ihook.pytest_collectreport(report=rep) 968 if not rep.passed: 969 continue 970 subnodes = rep.result 971 972 # Prune this level. 973 any_matched_in_collector = False 974 for node in reversed(subnodes): 975 # Path part e.g. `/a/b/` in `/a/b/test_file.py::TestIt::test_it`. 976 if isinstance(matchparts[0], Path): 977 is_match = node.path == matchparts[0] 978 if sys.platform == "win32" and not is_match: 979 # In case the file paths do not match, fallback to samefile() to 980 # account for short-paths on Windows (#11895). But use a version 981 # which doesn't resolve symlinks, otherwise we might match the 982 # same file more than once (#12039). 983 is_match = samefile_nofollow(node.path, matchparts[0]) 984 985 # Name part e.g. `TestIt` in `/a/b/test_file.py::TestIt::test_it`. 986 else: 987 if len(matchparts) == 1: 988 # This the last part, one parametrization goes. 989 if parametrization is not None: 990 # A parametrized arg must match exactly. 991 is_match = node.name == matchparts[0] + parametrization 992 else: 993 # A non-parameterized arg matches all parametrizations (if any). 994 # TODO: Remove the hacky split once the collection structure 995 # contains parametrization. 996 is_match = node.name.split("[")[0] == matchparts[0] 997 else: 998 is_match = node.name == matchparts[0] 999 if is_match: 1000 work.append((node, matchparts[1:])) 1001 any_matched_in_collector = True 1002 1003 if not any_matched_in_collector: 1004 notfound_collectors.append(matchnode) 1005 1006 if not any_matched_in_initial_part: 1007 report_arg = "::".join((str(argpath), *names)) 1008 self._notfound.append((report_arg, notfound_collectors)) 1009 1010 self.trace.root.indent -= 1 1011 1012 def genitems(self, node: nodes.Item | nodes.Collector) -> Iterator[nodes.Item]: 1013 self.trace("genitems", node) 1014 if isinstance(node, nodes.Item): 1015 node.ihook.pytest_itemcollected(item=node) 1016 yield node 1017 else: 1018 assert isinstance(node, nodes.Collector) 1019 # For backward compat, dedup only applies to files. 1020 handle_dupes = not isinstance(node, nodes.File) 1021 rep, duplicate = self._collect_one_node(node, handle_dupes) 1022 if rep.passed: 1023 for subnode in rep.result: 1024 yield from self.genitems(subnode) 1025 if not duplicate: 1026 node.ihook.pytest_collectreport(report=rep)
The root of the collection tree.
Session collects the initial paths given as arguments to pytest.
592 def __init__(self, config: Config) -> None: 593 super().__init__( 594 name="", 595 path=config.rootpath, 596 fspath=None, 597 parent=None, 598 config=config, 599 session=self, 600 nodeid="", 601 ) 602 self.testsfailed = 0 603 self.testscollected = 0 604 self._shouldstop: bool | str = False 605 self._shouldfail: bool | str = False 606 self.trace = config.trace.root.get("collection") 607 self._initialpaths: frozenset[Path] = frozenset() 608 self._initialpaths_with_parents: frozenset[Path] = frozenset() 609 self._notfound: list[tuple[str, Sequence[nodes.Collector]]] = [] 610 self._initial_parts: list[CollectionArgument] = [] 611 self._collection_cache: dict[nodes.Collector, CollectReport] = {} 612 self.items: list[nodes.Item] = [] 613 614 self._bestrelpathcache: dict[Path, str] = _bestrelpath_cache(config.rootpath) 615 616 self.config.pluginmanager.register(self, name="session")
667 @property 668 def startpath(self) -> Path: 669 """The path from which pytest was invoked. 670 671 .. versionadded:: 7.0.0 672 """ 673 return self.config.invocation_params.dir
The path from which pytest was invoked.
New in version 7.0.0.
686 @hookimpl(tryfirst=True) 687 def pytest_runtest_logreport(self, report: TestReport | CollectReport) -> None: 688 if report.failed and not hasattr(report, "wasxfail"): 689 self.testsfailed += 1 690 maxfail = self.config.getvalue("maxfail") 691 if maxfail and self.testsfailed >= maxfail: 692 self.shouldfail = f"stopping after {self.testsfailed} failures"
686 @hookimpl(tryfirst=True) 687 def pytest_runtest_logreport(self, report: TestReport | CollectReport) -> None: 688 if report.failed and not hasattr(report, "wasxfail"): 689 self.testsfailed += 1 690 maxfail = self.config.getvalue("maxfail") 691 if maxfail and self.testsfailed >= maxfail: 692 self.shouldfail = f"stopping after {self.testsfailed} failures"
696 def isinitpath( 697 self, 698 path: str | os.PathLike[str], 699 *, 700 with_parents: bool = False, 701 ) -> bool: 702 """Is path an initial path? 703 704 An initial path is a path explicitly given to pytest on the command 705 line. 706 707 :param with_parents: 708 If set, also return True if the path is a parent of an initial path. 709 710 .. versionchanged:: 8.0 711 Added the ``with_parents`` parameter. 712 """ 713 # Optimization: Path(Path(...)) is much slower than isinstance. 714 path_ = path if isinstance(path, Path) else Path(path) 715 if with_parents: 716 return path_ in self._initialpaths_with_parents 717 else: 718 return path_ in self._initialpaths
Is path an initial path?
An initial path is a path explicitly given to pytest on the command line.
Parameters
- with_parents: If set, also return True if the path is a parent of an initial path.
Changed in version 8.0:
Added the with_parents parameter.
720 def gethookproxy(self, fspath: os.PathLike[str]) -> pluggy.HookRelay: 721 # Optimization: Path(Path(...)) is much slower than isinstance. 722 path = fspath if isinstance(fspath, Path) else Path(fspath) 723 pm = self.config.pluginmanager 724 # Check if we have the common case of running 725 # hooks with all conftest.py files. 726 my_conftestmodules = pm._getconftestmodules(path) 727 remove_mods = pm._conftest_plugins.difference(my_conftestmodules) 728 proxy: pluggy.HookRelay 729 if remove_mods: 730 # One or more conftests are not in use at this path. 731 proxy = PathAwareHookProxy(FSHookProxy(pm, remove_mods)) # type: ignore[arg-type,assignment] 732 else: 733 # All plugins are active for this fspath. 734 proxy = self.config.hook 735 return proxy
778 def perform_collect( 779 self, args: Sequence[str] | None = None, genitems: bool = True 780 ) -> Sequence[nodes.Item | nodes.Collector]: 781 """Perform the collection phase for this session. 782 783 This is called by the default :hook:`pytest_collection` hook 784 implementation; see the documentation of this hook for more details. 785 For testing purposes, it may also be called directly on a fresh 786 ``Session``. 787 788 This function normally recursively expands any collectors collected 789 from the session to their items, and only items are returned. For 790 testing purposes, this may be suppressed by passing ``genitems=False``, 791 in which case the return value contains these collectors unexpanded, 792 and ``session.items`` is empty. 793 """ 794 if args is None: 795 args = self.config.args 796 797 self.trace("perform_collect", self, args) 798 self.trace.root.indent += 1 799 800 hook = self.config.hook 801 802 self._notfound = [] 803 self._initial_parts = [] 804 self._collection_cache = {} 805 self.items = [] 806 items: Sequence[nodes.Item | nodes.Collector] = self.items 807 consider_namespace_packages: bool = self.config.getini( 808 "consider_namespace_packages" 809 ) 810 try: 811 initialpaths: list[Path] = [] 812 initialpaths_with_parents: list[Path] = [] 813 814 collection_args = [ 815 resolve_collection_argument( 816 self.config.invocation_params.dir, 817 arg, 818 i, 819 as_pypath=self.config.option.pyargs, 820 consider_namespace_packages=consider_namespace_packages, 821 ) 822 for i, arg in enumerate(args) 823 ] 824 825 if not self.config.getoption("keepduplicates"): 826 # Normalize the collection arguments -- remove duplicates and overlaps. 827 self._initial_parts = normalize_collection_arguments(collection_args) 828 else: 829 self._initial_parts = collection_args 830 831 for collection_argument in self._initial_parts: 832 initialpaths.append(collection_argument.path) 833 initialpaths_with_parents.append(collection_argument.path) 834 initialpaths_with_parents.extend(collection_argument.path.parents) 835 self._initialpaths = frozenset(initialpaths) 836 self._initialpaths_with_parents = frozenset(initialpaths_with_parents) 837 838 rep = collect_one_node(self) 839 self.ihook.pytest_collectreport(report=rep) 840 self.trace.root.indent -= 1 841 if self._notfound: 842 errors = [] 843 for arg, collectors in self._notfound: 844 if collectors: 845 errors.append( 846 f"not found: {arg}\n(no match in any of {collectors!r})" 847 ) 848 else: 849 errors.append(f"found no collectors for {arg}") 850 851 raise UsageError(*errors) 852 853 if not genitems: 854 items = rep.result 855 else: 856 if rep.passed: 857 for node in rep.result: 858 self.items.extend(self.genitems(node)) 859 860 self.config.pluginmanager.check_pending() 861 hook.pytest_collection_modifyitems( 862 session=self, config=self.config, items=items 863 ) 864 finally: 865 self._notfound = [] 866 self._initial_parts = [] 867 self._collection_cache = {} 868 hook.pytest_collection_finish(session=self) 869 870 if genitems: 871 self.testscollected = len(items) 872 873 return items
Perform the collection phase for this session.
This is called by the default :hook:pytest_collection hook
implementation; see the documentation of this hook for more details.
For testing purposes, it may also be called directly on a fresh
Session.
This function normally recursively expands any collectors collected
from the session to their items, and only items are returned. For
testing purposes, this may be suppressed by passing genitems=False,
in which case the return value contains these collectors unexpanded,
and session.items is empty.
888 def collect(self) -> Iterator[nodes.Item | nodes.Collector]: 889 # This is a cache for the root directories of the initial paths. 890 # We can't use collection_cache for Session because of its special 891 # role as the bootstrapping collector. 892 path_cache: dict[Path, Sequence[nodes.Collector]] = {} 893 894 pm = self.config.pluginmanager 895 896 for collection_argument in self._initial_parts: 897 self.trace("processing argument", collection_argument) 898 self.trace.root.indent += 1 899 900 argpath = collection_argument.path 901 names = collection_argument.parts 902 parametrization = collection_argument.parametrization 903 module_name = collection_argument.module_name 904 905 # resolve_collection_argument() ensures this. 906 if argpath.is_dir(): 907 assert not names, f"invalid arg {(argpath, names)!r}" 908 909 paths = [argpath] 910 # Add relevant parents of the path, from the root, e.g. 911 # /a/b/c.py -> [/, /a, /a/b, /a/b/c.py] 912 if module_name is None: 913 # Paths outside of the confcutdir should not be considered. 914 for path in argpath.parents: 915 if not pm._is_in_confcutdir(path): 916 break 917 paths.insert(0, path) 918 else: 919 # For --pyargs arguments, only consider paths matching the module 920 # name. Paths beyond the package hierarchy are not included. 921 module_name_parts = module_name.split(".") 922 for i, path in enumerate(argpath.parents, 2): 923 if i > len(module_name_parts) or path.stem != module_name_parts[-i]: 924 break 925 paths.insert(0, path) 926 927 # Start going over the parts from the root, collecting each level 928 # and discarding all nodes which don't match the level's part. 929 any_matched_in_initial_part = False 930 notfound_collectors = [] 931 work: list[tuple[nodes.Collector | nodes.Item, list[Path | str]]] = [ 932 (self, [*paths, *names]) 933 ] 934 while work: 935 matchnode, matchparts = work.pop() 936 937 # Pop'd all of the parts, this is a match. 938 if not matchparts: 939 yield matchnode 940 any_matched_in_initial_part = True 941 continue 942 943 # Should have been matched by now, discard. 944 if not isinstance(matchnode, nodes.Collector): 945 continue 946 947 # Collect this level of matching. 948 # Collecting Session (self) is done directly to avoid endless 949 # recursion to this function. 950 subnodes: Sequence[nodes.Collector | nodes.Item] 951 if isinstance(matchnode, Session): 952 assert isinstance(matchparts[0], Path) 953 subnodes = matchnode._collect_path(matchparts[0], path_cache) 954 else: 955 # For backward compat, files given directly multiple 956 # times on the command line should not be deduplicated. 957 handle_dupes = not ( 958 len(matchparts) == 1 959 and isinstance(matchparts[0], Path) 960 and matchparts[0].is_file() 961 ) 962 rep, duplicate = self._collect_one_node(matchnode, handle_dupes) 963 if not duplicate and not rep.passed: 964 # Report collection failures here to avoid failing to 965 # run some test specified in the command line because 966 # the module could not be imported (#134). 967 matchnode.ihook.pytest_collectreport(report=rep) 968 if not rep.passed: 969 continue 970 subnodes = rep.result 971 972 # Prune this level. 973 any_matched_in_collector = False 974 for node in reversed(subnodes): 975 # Path part e.g. `/a/b/` in `/a/b/test_file.py::TestIt::test_it`. 976 if isinstance(matchparts[0], Path): 977 is_match = node.path == matchparts[0] 978 if sys.platform == "win32" and not is_match: 979 # In case the file paths do not match, fallback to samefile() to 980 # account for short-paths on Windows (#11895). But use a version 981 # which doesn't resolve symlinks, otherwise we might match the 982 # same file more than once (#12039). 983 is_match = samefile_nofollow(node.path, matchparts[0]) 984 985 # Name part e.g. `TestIt` in `/a/b/test_file.py::TestIt::test_it`. 986 else: 987 if len(matchparts) == 1: 988 # This the last part, one parametrization goes. 989 if parametrization is not None: 990 # A parametrized arg must match exactly. 991 is_match = node.name == matchparts[0] + parametrization 992 else: 993 # A non-parameterized arg matches all parametrizations (if any). 994 # TODO: Remove the hacky split once the collection structure 995 # contains parametrization. 996 is_match = node.name.split("[")[0] == matchparts[0] 997 else: 998 is_match = node.name == matchparts[0] 999 if is_match: 1000 work.append((node, matchparts[1:])) 1001 any_matched_in_collector = True 1002 1003 if not any_matched_in_collector: 1004 notfound_collectors.append(matchnode) 1005 1006 if not any_matched_in_initial_part: 1007 report_arg = "::".join((str(argpath), *names)) 1008 self._notfound.append((report_arg, notfound_collectors)) 1009 1010 self.trace.root.indent -= 1
Collect children (items and collectors) for this collector.
1012 def genitems(self, node: nodes.Item | nodes.Collector) -> Iterator[nodes.Item]: 1013 self.trace("genitems", node) 1014 if isinstance(node, nodes.Item): 1015 node.ihook.pytest_itemcollected(item=node) 1016 yield node 1017 else: 1018 assert isinstance(node, nodes.Collector) 1019 # For backward compat, dedup only applies to files. 1020 handle_dupes = not isinstance(node, nodes.File) 1021 rep, duplicate = self._collect_one_node(node, handle_dupes) 1022 if rep.passed: 1023 for subnode in rep.result: 1024 yield from self.genitems(subnode) 1025 if not duplicate: 1026 node.ihook.pytest_collectreport(report=rep)
Signals that the test run was interrupted.
Signals a stop as failed test run.
30class Stash: 31 r"""``Stash`` is a type-safe heterogeneous mutable mapping that 32 allows keys and value types to be defined separately from 33 where it (the ``Stash``) is created. 34 35 Usually you will be given an object which has a ``Stash``, for example 36 :class:`~pytest.Config` or a :class:`~_pytest.nodes.Node`: 37 38 .. code-block:: python 39 40 stash: Stash = some_object.stash 41 42 If a module or plugin wants to store data in this ``Stash``, it creates 43 :class:`StashKey`\s for its keys (at the module level): 44 45 .. code-block:: python 46 47 # At the top-level of the module 48 some_str_key = StashKey[str]() 49 some_bool_key = StashKey[bool]() 50 51 To store information: 52 53 .. code-block:: python 54 55 # Value type must match the key. 56 stash[some_str_key] = "value" 57 stash[some_bool_key] = True 58 59 To retrieve the information: 60 61 .. code-block:: python 62 63 # The static type of some_str is str. 64 some_str = stash[some_str_key] 65 # The static type of some_bool is bool. 66 some_bool = stash[some_bool_key] 67 68 .. versionadded:: 7.0 69 """ 70 71 __slots__ = ("_storage",) 72 73 def __init__(self) -> None: 74 self._storage: dict[StashKey[Any], object] = {} 75 76 def __setitem__(self, key: StashKey[T], value: T) -> None: 77 """Set a value for key.""" 78 self._storage[key] = value 79 80 def __getitem__(self, key: StashKey[T]) -> T: 81 """Get the value for key. 82 83 Raises ``KeyError`` if the key wasn't set before. 84 """ 85 return cast(T, self._storage[key]) 86 87 def get(self, key: StashKey[T], default: D) -> T | D: 88 """Get the value for key, or return default if the key wasn't set 89 before.""" 90 try: 91 return self[key] 92 except KeyError: 93 return default 94 95 def setdefault(self, key: StashKey[T], default: T) -> T: 96 """Return the value of key if already set, otherwise set the value 97 of key to default and return default.""" 98 try: 99 return self[key] 100 except KeyError: 101 self[key] = default 102 return default 103 104 def __delitem__(self, key: StashKey[T]) -> None: 105 """Delete the value for key. 106 107 Raises ``KeyError`` if the key wasn't set before. 108 """ 109 del self._storage[key] 110 111 def __contains__(self, key: StashKey[T]) -> bool: 112 """Return whether key was set.""" 113 return key in self._storage 114 115 def __len__(self) -> int: 116 """Return how many items exist in the stash.""" 117 return len(self._storage)
Stash is a type-safe heterogeneous mutable mapping that
allows keys and value types to be defined separately from
where it (the Stash) is created.
Usually you will be given an object which has a Stash, for example
~pytest.Config or a ~_pytest.nodes.Node:
stash: Stash = some_object.stash
If a module or plugin wants to store data in this Stash, it creates
StashKey\s for its keys (at the module level):
# At the top-level of the module
some_str_key = StashKey[str]()
some_bool_key = StashKey[bool]()
To store information:
# Value type must match the key.
stash[some_str_key] = "value"
stash[some_bool_key] = True
To retrieve the information:
# The static type of some_str is str.
some_str = stash[some_str_key]
# The static type of some_bool is bool.
some_bool = stash[some_bool_key]
New in version 7.0.
87 def get(self, key: StashKey[T], default: D) -> T | D: 88 """Get the value for key, or return default if the key wasn't set 89 before.""" 90 try: 91 return self[key] 92 except KeyError: 93 return default
Get the value for key, or return default if the key wasn't set before.
95 def setdefault(self, key: StashKey[T], default: T) -> T: 96 """Return the value of key if already set, otherwise set the value 97 of key to default and return default.""" 98 try: 99 return self[key] 100 except KeyError: 101 self[key] = default 102 return default
Return the value of key if already set, otherwise set the value of key to default and return default.
17class StashKey(Generic[T]): 18 """``StashKey`` is an object used as a key to a :class:`Stash`. 19 20 A ``StashKey`` is associated with the type ``T`` of the value of the key. 21 22 A ``StashKey`` is unique and cannot conflict with another key. 23 24 .. versionadded:: 7.0 25 """ 26 27 __slots__ = ()
73@dataclasses.dataclass(init=False) 74class SubtestReport(TestReport): 75 context: SubtestContext 76 77 @property 78 def head_line(self) -> str: 79 _, _, domain = self.location 80 return f"{domain} {self._sub_test_description()}" 81 82 def _sub_test_description(self) -> str: 83 parts = [] 84 if self.context.msg is not None: 85 parts.append(f"[{self.context.msg}]") 86 if self.context.kwargs: 87 params_desc = ", ".join( 88 f"{k}={saferepr(v)}" for (k, v) in self.context.kwargs.items() 89 ) 90 parts.append(f"({params_desc})") 91 return " ".join(parts) or "(<subtest>)" 92 93 def _to_json(self) -> dict[str, Any]: 94 data = super()._to_json() 95 del data["context"] 96 data["_report_type"] = "SubTestReport" 97 data["_subtest.context"] = self.context._to_json() 98 return data 99 100 @classmethod 101 def _from_json(cls, reportdict: dict[str, Any]) -> SubtestReport: 102 report = super()._from_json(reportdict) 103 report.context = SubtestContext._from_json(reportdict["_subtest.context"]) 104 return report 105 106 @classmethod 107 def _new( 108 cls, 109 test_report: TestReport, 110 context: SubtestContext, 111 captured_output: Captured | None, 112 captured_logs: CapturedLogs | None, 113 ) -> Self: 114 result = super()._from_json(test_report._to_json()) 115 result.context = context 116 117 if captured_output: 118 if captured_output.out: 119 result.sections.append(("Captured stdout call", captured_output.out)) 120 if captured_output.err: 121 result.sections.append(("Captured stderr call", captured_output.err)) 122 123 if captured_logs and (log := captured_logs.handler.stream.getvalue()): 124 result.sections.append(("Captured log call", log)) 125 126 return result
77 @property 78 def head_line(self) -> str: 79 _, _, domain = self.location 80 return f"{domain} {self._sub_test_description()}"
Experimental The head line shown with longrepr output for this report, more commonly during traceback representation during failures::
________ Test.foo ________
In the example above, the head_line is "Test.foo".
This function is considered experimental, so beware that it is subject to changes even in patch releases.
139class Subtests: 140 """Subtests fixture, enables declaring subtests inside test functions via the :meth:`test` method.""" 141 142 def __init__( 143 self, 144 ihook: pluggy.HookRelay, 145 suspend_capture_ctx: Callable[[], AbstractContextManager[None]], 146 request: SubRequest, 147 *, 148 _ispytest: bool = False, 149 ) -> None: 150 check_ispytest(_ispytest) 151 self._ihook = ihook 152 self._suspend_capture_ctx = suspend_capture_ctx 153 self._request = request 154 155 def test( 156 self, 157 msg: str | None = None, 158 **kwargs: Any, 159 ) -> _SubTestContextManager: 160 """ 161 Context manager for subtests, capturing exceptions raised inside the subtest scope and 162 reporting assertion failures and errors individually. 163 164 Usage 165 ----- 166 167 .. code-block:: python 168 169 def test(subtests): 170 for i in range(5): 171 with subtests.test("custom message", i=i): 172 assert i % 2 == 0 173 174 :param msg: 175 If given, the message will be shown in the test report in case of subtest failure. 176 177 :param kwargs: 178 Arbitrary values that are also added to the subtest report. 179 """ 180 return _SubTestContextManager( 181 self._ihook, 182 msg, 183 kwargs, 184 request=self._request, 185 suspend_capture_ctx=self._suspend_capture_ctx, 186 config=self._request.config, 187 )
Subtests fixture, enables declaring subtests inside test functions via the test() method.
142 def __init__( 143 self, 144 ihook: pluggy.HookRelay, 145 suspend_capture_ctx: Callable[[], AbstractContextManager[None]], 146 request: SubRequest, 147 *, 148 _ispytest: bool = False, 149 ) -> None: 150 check_ispytest(_ispytest) 151 self._ihook = ihook 152 self._suspend_capture_ctx = suspend_capture_ctx 153 self._request = request
155 def test( 156 self, 157 msg: str | None = None, 158 **kwargs: Any, 159 ) -> _SubTestContextManager: 160 """ 161 Context manager for subtests, capturing exceptions raised inside the subtest scope and 162 reporting assertion failures and errors individually. 163 164 Usage 165 ----- 166 167 .. code-block:: python 168 169 def test(subtests): 170 for i in range(5): 171 with subtests.test("custom message", i=i): 172 assert i % 2 == 0 173 174 :param msg: 175 If given, the message will be shown in the test report in case of subtest failure. 176 177 :param kwargs: 178 Arbitrary values that are also added to the subtest report. 179 """ 180 return _SubTestContextManager( 181 self._ihook, 182 msg, 183 kwargs, 184 request=self._request, 185 suspend_capture_ctx=self._suspend_capture_ctx, 186 config=self._request.config, 187 )
Context manager for subtests, capturing exceptions raised inside the subtest scope and reporting assertion failures and errors individually.
Usage
def test(subtests):
for i in range(5):
with subtests.test("custom message", i=i):
assert i % 2 == 0
Parameters
msg: If given, the message will be shown in the test report in case of subtest failure.
kwargs: Arbitrary values that are also added to the subtest report.
42@final 43@dataclasses.dataclass 44class TempPathFactory: 45 """Factory for temporary directories under the common base temp directory, 46 as discussed at :ref:`temporary directory location and retention`. 47 """ 48 49 _given_basetemp: Path | None 50 # pluggy TagTracerSub, not currently exposed, so Any. 51 _trace: Any 52 _basetemp: Path | None 53 _retention_count: int 54 _retention_policy: RetentionType 55 56 def __init__( 57 self, 58 given_basetemp: Path | None, 59 retention_count: int, 60 retention_policy: RetentionType, 61 trace, 62 basetemp: Path | None = None, 63 *, 64 _ispytest: bool = False, 65 ) -> None: 66 check_ispytest(_ispytest) 67 if given_basetemp is None: 68 self._given_basetemp = None 69 else: 70 # Use os.path.abspath() to get absolute path instead of resolve() as it 71 # does not work the same in all platforms (see #4427). 72 # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012). 73 self._given_basetemp = Path(os.path.abspath(str(given_basetemp))) 74 self._trace = trace 75 self._retention_count = retention_count 76 self._retention_policy = retention_policy 77 self._basetemp = basetemp 78 79 @classmethod 80 def from_config( 81 cls, 82 config: Config, 83 *, 84 _ispytest: bool = False, 85 ) -> TempPathFactory: 86 """Create a factory according to pytest configuration. 87 88 :meta private: 89 """ 90 check_ispytest(_ispytest) 91 count = int(config.getini("tmp_path_retention_count")) 92 if count < 0: 93 raise ValueError( 94 f"tmp_path_retention_count must be >= 0. Current input: {count}." 95 ) 96 97 policy = config.getini("tmp_path_retention_policy") 98 if policy not in ("all", "failed", "none"): 99 raise ValueError( 100 f"tmp_path_retention_policy must be either all, failed, none. Current input: {policy}." 101 ) 102 103 return cls( 104 given_basetemp=config.option.basetemp, 105 trace=config.trace.get("tmpdir"), 106 retention_count=count, 107 retention_policy=policy, 108 _ispytest=True, 109 ) 110 111 def _ensure_relative_to_basetemp(self, basename: str) -> str: 112 basename = os.path.normpath(basename) 113 if (self.getbasetemp() / basename).resolve().parent != self.getbasetemp(): 114 raise ValueError(f"{basename} is not a normalized and relative path") 115 return basename 116 117 def mktemp(self, basename: str, numbered: bool = True) -> Path: 118 """Create a new temporary directory managed by the factory. 119 120 :param basename: 121 Directory base name, must be a relative path. 122 123 :param numbered: 124 If ``True``, ensure the directory is unique by adding a numbered 125 suffix greater than any existing one: ``basename="foo-"`` and ``numbered=True`` 126 means that this function will create directories named ``"foo-0"``, 127 ``"foo-1"``, ``"foo-2"`` and so on. 128 129 :returns: 130 The path to the new directory. 131 """ 132 basename = self._ensure_relative_to_basetemp(basename) 133 if not numbered: 134 p = self.getbasetemp().joinpath(basename) 135 p.mkdir(mode=0o700) 136 else: 137 p = make_numbered_dir(root=self.getbasetemp(), prefix=basename, mode=0o700) 138 self._trace("mktemp", p) 139 return p 140 141 def getbasetemp(self) -> Path: 142 """Return the base temporary directory, creating it if needed. 143 144 :returns: 145 The base temporary directory. 146 """ 147 if self._basetemp is not None: 148 return self._basetemp 149 150 if self._given_basetemp is not None: 151 basetemp = self._given_basetemp 152 if basetemp.exists(): 153 rm_rf(basetemp) 154 basetemp.mkdir(mode=0o700) 155 basetemp = basetemp.resolve() 156 else: 157 from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT") 158 temproot = Path(from_env or tempfile.gettempdir()).resolve() 159 user = get_user() or "unknown" 160 # use a sub-directory in the temproot to speed-up 161 # make_numbered_dir() call 162 rootdir = temproot.joinpath(f"pytest-of-{user}") 163 try: 164 rootdir.mkdir(mode=0o700, exist_ok=True) 165 except OSError: 166 # getuser() likely returned illegal characters for the platform, use unknown back off mechanism 167 rootdir = temproot.joinpath("pytest-of-unknown") 168 rootdir.mkdir(mode=0o700, exist_ok=True) 169 # Because we use exist_ok=True with a predictable name, make sure 170 # we are the owners, to prevent any funny business (on unix, where 171 # temproot is usually shared). 172 # Also, to keep things private, fixup any world-readable temp 173 # rootdir's permissions. Historically 0o755 was used, so we can't 174 # just error out on this, at least for a while. 175 # Don't follow symlinks, otherwise we're open to symlink-swapping 176 # TOCTOU vulnerability. 177 # This check makes us vulnerable to a DoS - a user can `mkdir 178 # /tmp/pytest-of-otheruser` and then `otheruser` will fail this 179 # check. For now we don't consider it a real problem. otheruser can 180 # change their TMPDIR or --basetemp, and maybe give the prankster a 181 # good scolding. 182 uid = get_user_id() 183 if uid is not None: 184 stat_follow_symlinks = ( 185 False if os.stat in os.supports_follow_symlinks else True 186 ) 187 rootdir_stat = rootdir.stat(follow_symlinks=stat_follow_symlinks) 188 if stat.S_ISLNK(rootdir_stat.st_mode): 189 raise OSError( 190 f"The temporary directory {rootdir} is a symbolic link. " 191 "Fix this and try again." 192 ) 193 if rootdir_stat.st_uid != uid: 194 raise OSError( 195 f"The temporary directory {rootdir} is not owned by the current user. " 196 "Fix this and try again." 197 ) 198 if (rootdir_stat.st_mode & 0o077) != 0: 199 chmod_follow_symlinks = ( 200 False if os.chmod in os.supports_follow_symlinks else True 201 ) 202 rootdir.chmod( 203 rootdir_stat.st_mode & ~0o077, 204 follow_symlinks=chmod_follow_symlinks, 205 ) 206 keep = self._retention_count 207 if self._retention_policy == "none": 208 keep = 0 209 basetemp = make_numbered_dir_with_cleanup( 210 prefix="pytest-", 211 root=rootdir, 212 keep=keep, 213 lock_timeout=LOCK_TIMEOUT, 214 mode=0o700, 215 ) 216 assert basetemp is not None, basetemp 217 self._basetemp = basetemp 218 self._trace("new basetemp", basetemp) 219 return basetemp
Factory for temporary directories under the common base temp directory,
as discussed at :ref:temporary directory location and retention.
56 def __init__( 57 self, 58 given_basetemp: Path | None, 59 retention_count: int, 60 retention_policy: RetentionType, 61 trace, 62 basetemp: Path | None = None, 63 *, 64 _ispytest: bool = False, 65 ) -> None: 66 check_ispytest(_ispytest) 67 if given_basetemp is None: 68 self._given_basetemp = None 69 else: 70 # Use os.path.abspath() to get absolute path instead of resolve() as it 71 # does not work the same in all platforms (see #4427). 72 # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012). 73 self._given_basetemp = Path(os.path.abspath(str(given_basetemp))) 74 self._trace = trace 75 self._retention_count = retention_count 76 self._retention_policy = retention_policy 77 self._basetemp = basetemp
79 @classmethod 80 def from_config( 81 cls, 82 config: Config, 83 *, 84 _ispytest: bool = False, 85 ) -> TempPathFactory: 86 """Create a factory according to pytest configuration. 87 88 :meta private: 89 """ 90 check_ispytest(_ispytest) 91 count = int(config.getini("tmp_path_retention_count")) 92 if count < 0: 93 raise ValueError( 94 f"tmp_path_retention_count must be >= 0. Current input: {count}." 95 ) 96 97 policy = config.getini("tmp_path_retention_policy") 98 if policy not in ("all", "failed", "none"): 99 raise ValueError( 100 f"tmp_path_retention_policy must be either all, failed, none. Current input: {policy}." 101 ) 102 103 return cls( 104 given_basetemp=config.option.basetemp, 105 trace=config.trace.get("tmpdir"), 106 retention_count=count, 107 retention_policy=policy, 108 _ispytest=True, 109 )
Create a factory according to pytest configuration.
:meta private:
117 def mktemp(self, basename: str, numbered: bool = True) -> Path: 118 """Create a new temporary directory managed by the factory. 119 120 :param basename: 121 Directory base name, must be a relative path. 122 123 :param numbered: 124 If ``True``, ensure the directory is unique by adding a numbered 125 suffix greater than any existing one: ``basename="foo-"`` and ``numbered=True`` 126 means that this function will create directories named ``"foo-0"``, 127 ``"foo-1"``, ``"foo-2"`` and so on. 128 129 :returns: 130 The path to the new directory. 131 """ 132 basename = self._ensure_relative_to_basetemp(basename) 133 if not numbered: 134 p = self.getbasetemp().joinpath(basename) 135 p.mkdir(mode=0o700) 136 else: 137 p = make_numbered_dir(root=self.getbasetemp(), prefix=basename, mode=0o700) 138 self._trace("mktemp", p) 139 return p
Create a new temporary directory managed by the factory.
Parameters
basename: Directory base name, must be a relative path.
numbered: If
True, ensure the directory is unique by adding a numbered suffix greater than any existing one:basename="foo-"andnumbered=Truemeans that this function will create directories named"foo-0","foo-1","foo-2"and so on.
:returns: The path to the new directory.
141 def getbasetemp(self) -> Path: 142 """Return the base temporary directory, creating it if needed. 143 144 :returns: 145 The base temporary directory. 146 """ 147 if self._basetemp is not None: 148 return self._basetemp 149 150 if self._given_basetemp is not None: 151 basetemp = self._given_basetemp 152 if basetemp.exists(): 153 rm_rf(basetemp) 154 basetemp.mkdir(mode=0o700) 155 basetemp = basetemp.resolve() 156 else: 157 from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT") 158 temproot = Path(from_env or tempfile.gettempdir()).resolve() 159 user = get_user() or "unknown" 160 # use a sub-directory in the temproot to speed-up 161 # make_numbered_dir() call 162 rootdir = temproot.joinpath(f"pytest-of-{user}") 163 try: 164 rootdir.mkdir(mode=0o700, exist_ok=True) 165 except OSError: 166 # getuser() likely returned illegal characters for the platform, use unknown back off mechanism 167 rootdir = temproot.joinpath("pytest-of-unknown") 168 rootdir.mkdir(mode=0o700, exist_ok=True) 169 # Because we use exist_ok=True with a predictable name, make sure 170 # we are the owners, to prevent any funny business (on unix, where 171 # temproot is usually shared). 172 # Also, to keep things private, fixup any world-readable temp 173 # rootdir's permissions. Historically 0o755 was used, so we can't 174 # just error out on this, at least for a while. 175 # Don't follow symlinks, otherwise we're open to symlink-swapping 176 # TOCTOU vulnerability. 177 # This check makes us vulnerable to a DoS - a user can `mkdir 178 # /tmp/pytest-of-otheruser` and then `otheruser` will fail this 179 # check. For now we don't consider it a real problem. otheruser can 180 # change their TMPDIR or --basetemp, and maybe give the prankster a 181 # good scolding. 182 uid = get_user_id() 183 if uid is not None: 184 stat_follow_symlinks = ( 185 False if os.stat in os.supports_follow_symlinks else True 186 ) 187 rootdir_stat = rootdir.stat(follow_symlinks=stat_follow_symlinks) 188 if stat.S_ISLNK(rootdir_stat.st_mode): 189 raise OSError( 190 f"The temporary directory {rootdir} is a symbolic link. " 191 "Fix this and try again." 192 ) 193 if rootdir_stat.st_uid != uid: 194 raise OSError( 195 f"The temporary directory {rootdir} is not owned by the current user. " 196 "Fix this and try again." 197 ) 198 if (rootdir_stat.st_mode & 0o077) != 0: 199 chmod_follow_symlinks = ( 200 False if os.chmod in os.supports_follow_symlinks else True 201 ) 202 rootdir.chmod( 203 rootdir_stat.st_mode & ~0o077, 204 follow_symlinks=chmod_follow_symlinks, 205 ) 206 keep = self._retention_count 207 if self._retention_policy == "none": 208 keep = 0 209 basetemp = make_numbered_dir_with_cleanup( 210 prefix="pytest-", 211 root=rootdir, 212 keep=keep, 213 lock_timeout=LOCK_TIMEOUT, 214 mode=0o700, 215 ) 216 assert basetemp is not None, basetemp 217 self._basetemp = basetemp 218 self._trace("new basetemp", basetemp) 219 return basetemp
Return the base temporary directory, creating it if needed.
:returns: The base temporary directory.
267@final 268@dataclasses.dataclass 269class TempdirFactory: 270 """Backward compatibility wrapper that implements ``py.path.local`` 271 for :class:`TempPathFactory`. 272 273 .. note:: 274 These days, it is preferred to use ``tmp_path_factory``. 275 276 :ref:`About the tmpdir and tmpdir_factory fixtures<tmpdir and tmpdir_factory>`. 277 278 """ 279 280 _tmppath_factory: TempPathFactory 281 282 def __init__( 283 self, tmppath_factory: TempPathFactory, *, _ispytest: bool = False 284 ) -> None: 285 check_ispytest(_ispytest) 286 self._tmppath_factory = tmppath_factory 287 288 def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH: 289 """Same as :meth:`TempPathFactory.mktemp`, but returns a ``py.path.local`` object.""" 290 return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve()) 291 292 def getbasetemp(self) -> LEGACY_PATH: 293 """Same as :meth:`TempPathFactory.getbasetemp`, but returns a ``py.path.local`` object.""" 294 return legacy_path(self._tmppath_factory.getbasetemp().resolve())
Backward compatibility wrapper that implements py.path.local
for TempPathFactory.
These days, it is preferred to use tmp_path_factory.
:ref:About the tmpdir and tmpdir_factory fixtures<tmpdir and tmpdir_factory>.
288 def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH: 289 """Same as :meth:`TempPathFactory.mktemp`, but returns a ``py.path.local`` object.""" 290 return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve())
Same as TempPathFactory.mktemp(), but returns a py.path.local object.
292 def getbasetemp(self) -> LEGACY_PATH: 293 """Same as :meth:`TempPathFactory.getbasetemp`, but returns a ``py.path.local`` object.""" 294 return legacy_path(self._tmppath_factory.getbasetemp().resolve())
Same as TempPathFactory.getbasetemp(), but returns a py.path.local object.
378@final 379class TerminalReporter: 380 def __init__(self, config: Config, file: TextIO | None = None) -> None: 381 import _pytest.config 382 383 self.config = config 384 self._numcollected = 0 385 self._session: Session | None = None 386 self._showfspath: bool | None = None 387 388 self.stats: dict[str, list[Any]] = {} 389 self._main_color: str | None = None 390 self._known_types: list[str] | None = None 391 self.startpath = config.invocation_params.dir 392 if file is None: 393 file = sys.stdout 394 self._tw = _pytest.config.create_terminal_writer(config, file) 395 self._screen_width = self._tw.fullwidth 396 self.currentfspath: None | Path | str | int = None 397 self.reportchars = getreportopt(config) 398 self.foldskipped = config.option.fold_skipped 399 self.hasmarkup = self._tw.hasmarkup 400 # isatty should be a method but was wrongly implemented as a boolean. 401 # We use CallableBool here to support both. 402 self.isatty = compat.CallableBool(file.isatty()) 403 self._progress_nodeids_reported: set[str] = set() 404 self._timing_nodeids_reported: set[str] = set() 405 self._show_progress_info = self._determine_show_progress_info() 406 self._collect_report_last_write = timing.Instant() 407 self._already_displayed_warnings: int | None = None 408 self._keyboardinterrupt_memo: ExceptionRepr | None = None 409 410 def _determine_show_progress_info( 411 self, 412 ) -> Literal["progress", "count", "times", False]: 413 """Return whether we should display progress information based on the current config.""" 414 # do not show progress if we are not capturing output (#3038) unless explicitly 415 # overridden by progress-even-when-capture-no 416 if ( 417 self.config.getoption("capture", "no") == "no" 418 and self.config.getini("console_output_style") 419 != "progress-even-when-capture-no" 420 ): 421 return False 422 # do not show progress if we are showing fixture setup/teardown 423 if self.config.getoption("setupshow", False): 424 return False 425 cfg: str = self.config.getini("console_output_style") 426 if cfg in {"progress", "progress-even-when-capture-no"}: 427 return "progress" 428 elif cfg == "count": 429 return "count" 430 elif cfg == "times": 431 return "times" 432 else: 433 return False 434 435 @property 436 def verbosity(self) -> int: 437 verbosity: int = self.config.option.verbose 438 return verbosity 439 440 @property 441 def showheader(self) -> bool: 442 return self.verbosity >= 0 443 444 @property 445 def no_header(self) -> bool: 446 return bool(self.config.option.no_header) 447 448 @property 449 def no_summary(self) -> bool: 450 return bool(self.config.option.no_summary) 451 452 @property 453 def showfspath(self) -> bool: 454 if self._showfspath is None: 455 return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) >= 0 456 return self._showfspath 457 458 @showfspath.setter 459 def showfspath(self, value: bool | None) -> None: 460 self._showfspath = value 461 462 @property 463 def showlongtestinfo(self) -> bool: 464 return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) > 0 465 466 @property 467 def reported_progress(self) -> int: 468 """The amount of items reported in the progress so far. 469 470 :meta private: 471 """ 472 return len(self._progress_nodeids_reported) 473 474 def hasopt(self, char: str) -> bool: 475 char = {"xfailed": "x", "skipped": "s"}.get(char, char) 476 return char in self.reportchars 477 478 def write_fspath_result(self, nodeid: str, res: str, **markup: bool) -> None: 479 fspath = self.config.rootpath / nodeid.split("::")[0] 480 if self.currentfspath is None or fspath != self.currentfspath: 481 if self.currentfspath is not None and self._show_progress_info: 482 self._write_progress_information_filling_space() 483 self.currentfspath = fspath 484 relfspath = bestrelpath(self.startpath, fspath) 485 self._tw.line() 486 self._tw.write(relfspath + " ") 487 self._tw.write(res, flush=True, **markup) 488 489 def write_ensure_prefix(self, prefix: str, extra: str = "", **kwargs) -> None: 490 if self.currentfspath != prefix: 491 self._tw.line() 492 self.currentfspath = prefix 493 self._tw.write(prefix) 494 if extra: 495 self._tw.write(extra, **kwargs) 496 self.currentfspath = -2 497 498 def ensure_newline(self) -> None: 499 if self.currentfspath: 500 self._tw.line() 501 self.currentfspath = None 502 503 def wrap_write( 504 self, 505 content: str, 506 *, 507 flush: bool = False, 508 margin: int = 8, 509 line_sep: str = "\n", 510 **markup: bool, 511 ) -> None: 512 """Wrap message with margin for progress info.""" 513 width_of_current_line = self._tw.width_of_current_line 514 wrapped = line_sep.join( 515 textwrap.wrap( 516 " " * width_of_current_line + content, 517 width=self._screen_width - margin, 518 drop_whitespace=True, 519 replace_whitespace=False, 520 ), 521 ) 522 wrapped = wrapped[width_of_current_line:] 523 self._tw.write(wrapped, flush=flush, **markup) 524 525 def write(self, content: str, *, flush: bool = False, **markup: bool) -> None: 526 self._tw.write(content, flush=flush, **markup) 527 528 def write_raw(self, content: str, *, flush: bool = False) -> None: 529 self._tw.write_raw(content, flush=flush) 530 531 def flush(self) -> None: 532 self._tw.flush() 533 534 def write_line(self, line: str | bytes, **markup: bool) -> None: 535 if not isinstance(line, str): 536 line = str(line, errors="replace") 537 self.ensure_newline() 538 self._tw.line(line, **markup) 539 540 def rewrite(self, line: str, **markup: bool) -> None: 541 """Rewinds the terminal cursor to the beginning and writes the given line. 542 543 :param erase: 544 If True, will also add spaces until the full terminal width to ensure 545 previous lines are properly erased. 546 547 The rest of the keyword arguments are markup instructions. 548 """ 549 erase = markup.pop("erase", False) 550 if erase: 551 fill_count = self._tw.fullwidth - len(line) - 1 552 fill = " " * fill_count 553 else: 554 fill = "" 555 line = str(line) 556 self._tw.write("\r" + line + fill, **markup) 557 558 def write_sep( 559 self, 560 sep: str, 561 title: str | None = None, 562 fullwidth: int | None = None, 563 **markup: bool, 564 ) -> None: 565 self.ensure_newline() 566 self._tw.sep(sep, title, fullwidth, **markup) 567 568 def section(self, title: str, sep: str = "=", **kw: bool) -> None: 569 self._tw.sep(sep, title, **kw) 570 571 def line(self, msg: str, **kw: bool) -> None: 572 self._tw.line(msg, **kw) 573 574 def _add_stats(self, category: str, items: Sequence[Any]) -> None: 575 set_main_color = category not in self.stats 576 self.stats.setdefault(category, []).extend(items) 577 if set_main_color: 578 self._set_main_color() 579 580 def pytest_internalerror(self, excrepr: ExceptionRepr) -> bool: 581 for line in str(excrepr).split("\n"): 582 self.write_line("INTERNALERROR> " + line) 583 return True 584 585 def pytest_warning_recorded( 586 self, 587 warning_message: warnings.WarningMessage, 588 nodeid: str, 589 ) -> None: 590 from _pytest.warnings import warning_record_to_str 591 592 fslocation = warning_message.filename, warning_message.lineno 593 message = warning_record_to_str(warning_message) 594 595 warning_report = WarningReport( 596 fslocation=fslocation, message=message, nodeid=nodeid 597 ) 598 self._add_stats("warnings", [warning_report]) 599 600 def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None: 601 if self.config.option.traceconfig: 602 msg = f"PLUGIN registered: {plugin}" 603 # XXX This event may happen during setup/teardown time 604 # which unfortunately captures our output here 605 # which garbles our output if we use self.write_line. 606 self.write_line(msg) 607 608 def pytest_deselected(self, items: Sequence[Item]) -> None: 609 self._add_stats("deselected", items) 610 611 def pytest_runtest_logstart( 612 self, nodeid: str, location: tuple[str, int | None, str] 613 ) -> None: 614 fspath, lineno, domain = location 615 # Ensure that the path is printed before the 616 # 1st test of a module starts running. 617 if self.showlongtestinfo: 618 line = self._locationline(nodeid, fspath, lineno, domain) 619 self.write_ensure_prefix(line, "") 620 self.flush() 621 elif self.showfspath: 622 self.write_fspath_result(nodeid, "") 623 self.flush() 624 625 def pytest_runtest_logreport(self, report: TestReport) -> None: 626 self._tests_ran = True 627 rep = report 628 629 res = TestShortLogReport( 630 *self.config.hook.pytest_report_teststatus(report=rep, config=self.config) 631 ) 632 category, letter, word = res.category, res.letter, res.word 633 if not isinstance(word, tuple): 634 markup = None 635 else: 636 word, markup = word 637 self._add_stats(category, [rep]) 638 if not letter and not word: 639 # Probably passed setup/teardown. 640 return 641 if markup is None: 642 was_xfail = hasattr(report, "wasxfail") 643 if rep.passed and not was_xfail: 644 markup = {"green": True} 645 elif rep.passed and was_xfail: 646 markup = {"yellow": True} 647 elif rep.failed: 648 markup = {"red": True} 649 elif rep.skipped: 650 markup = {"yellow": True} 651 else: 652 markup = {} 653 self._progress_nodeids_reported.add(rep.nodeid) 654 if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0: 655 self._tw.write(letter, **markup) 656 # When running in xdist, the logreport and logfinish of multiple 657 # items are interspersed, e.g. `logreport`, `logreport`, 658 # `logfinish`, `logfinish`. To avoid the "past edge" calculation 659 # from getting confused and overflowing (#7166), do the past edge 660 # printing here and not in logfinish, except for the 100% which 661 # should only be printed after all teardowns are finished. 662 if self._show_progress_info and not self._is_last_item: 663 self._write_progress_information_if_past_edge() 664 else: 665 line = self._locationline(rep.nodeid, *rep.location) 666 running_xdist = hasattr(rep, "node") 667 if not running_xdist: 668 self.write_ensure_prefix(line, word, **markup) 669 if rep.skipped or hasattr(report, "wasxfail"): 670 reason = _get_raw_skip_reason(rep) 671 if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) < 2: 672 available_width = ( 673 (self._tw.fullwidth - self._tw.width_of_current_line) 674 - len(" [100%]") 675 - 1 676 ) 677 formatted_reason = _format_trimmed( 678 " ({})", reason, available_width 679 ) 680 else: 681 formatted_reason = f" ({reason})" 682 683 if reason and formatted_reason is not None: 684 self.wrap_write(formatted_reason) 685 if self._show_progress_info: 686 self._write_progress_information_filling_space() 687 else: 688 self.ensure_newline() 689 self._tw.write(f"[{rep.node.gateway.id}]") 690 if self._show_progress_info: 691 self._tw.write( 692 self._get_progress_information_message() + " ", cyan=True 693 ) 694 else: 695 self._tw.write(" ") 696 self._tw.write(word, **markup) 697 self._tw.write(" " + line) 698 self.currentfspath = -2 699 self.flush() 700 701 @property 702 def _is_last_item(self) -> bool: 703 assert self._session is not None 704 return self.reported_progress == self._session.testscollected 705 706 @hookimpl(wrapper=True) 707 def pytest_runtestloop(self) -> Generator[None, object, object]: 708 result = yield 709 710 # Write the final/100% progress -- deferred until the loop is complete. 711 if ( 712 self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0 713 and self._show_progress_info 714 and self.reported_progress 715 ): 716 self._write_progress_information_filling_space() 717 718 return result 719 720 def _get_progress_information_message(self) -> str: 721 assert self._session 722 collected = self._session.testscollected 723 if self._show_progress_info == "count": 724 if collected: 725 progress = self.reported_progress 726 counter_format = f"{{:{len(str(collected))}d}}" 727 format_string = f" [{counter_format}/{{}}]" 728 return format_string.format(progress, collected) 729 return f" [ {collected} / {collected} ]" 730 if self._show_progress_info == "times": 731 if not collected: 732 return "" 733 all_reports = ( 734 self._get_reports_to_display("passed") 735 + self._get_reports_to_display("xpassed") 736 + self._get_reports_to_display("failed") 737 + self._get_reports_to_display("xfailed") 738 + self._get_reports_to_display("skipped") 739 + self._get_reports_to_display("error") 740 + self._get_reports_to_display("") 741 ) 742 current_location = all_reports[-1].location[0] 743 not_reported = [ 744 r for r in all_reports if r.nodeid not in self._timing_nodeids_reported 745 ] 746 tests_in_module = sum( 747 i.location[0] == current_location for i in self._session.items 748 ) 749 tests_completed = sum( 750 r.when == "setup" 751 for r in not_reported 752 if r.location[0] == current_location 753 ) 754 last_in_module = tests_completed == tests_in_module 755 if self.showlongtestinfo or last_in_module: 756 self._timing_nodeids_reported.update(r.nodeid for r in not_reported) 757 return format_node_duration( 758 sum(r.duration for r in not_reported if isinstance(r, TestReport)) 759 ) 760 return "" 761 if collected: 762 return f" [{self.reported_progress * 100 // collected:3d}%]" 763 return " [100%]" 764 765 def _write_progress_information_if_past_edge(self) -> None: 766 w = self._width_of_current_line 767 if self._show_progress_info == "count": 768 assert self._session 769 num_tests = self._session.testscollected 770 progress_length = len(f" [{num_tests}/{num_tests}]") 771 elif self._show_progress_info == "times": 772 progress_length = len(" 99h 59m") 773 else: 774 progress_length = len(" [100%]") 775 past_edge = w + progress_length + 1 >= self._screen_width 776 if past_edge: 777 main_color, _ = self._get_main_color() 778 msg = self._get_progress_information_message() 779 self._tw.write(msg + "\n", **{main_color: True}) 780 781 def _write_progress_information_filling_space(self) -> None: 782 color, _ = self._get_main_color() 783 msg = self._get_progress_information_message() 784 w = self._width_of_current_line 785 fill = self._tw.fullwidth - w - 1 786 self.write(msg.rjust(fill), flush=True, **{color: True}) 787 788 @property 789 def _width_of_current_line(self) -> int: 790 """Return the width of the current line.""" 791 return self._tw.width_of_current_line 792 793 def pytest_collection(self) -> None: 794 if self.isatty(): 795 if self.config.option.verbose >= 0: 796 self.write("collecting ... ", flush=True, bold=True) 797 elif self.config.option.verbose >= 1: 798 self.write("collecting ... ", flush=True, bold=True) 799 800 def pytest_collectreport(self, report: CollectReport) -> None: 801 if report.failed: 802 self._add_stats("error", [report]) 803 elif report.skipped: 804 self._add_stats("skipped", [report]) 805 items = [x for x in report.result if isinstance(x, Item)] 806 self._numcollected += len(items) 807 if self.isatty(): 808 self.report_collect() 809 810 def report_collect(self, final: bool = False) -> None: 811 if self.config.option.verbose < 0: 812 return 813 814 if not final: 815 # Only write the "collecting" report every `REPORT_COLLECTING_RESOLUTION`. 816 if ( 817 self._collect_report_last_write.elapsed().seconds 818 < REPORT_COLLECTING_RESOLUTION 819 ): 820 return 821 self._collect_report_last_write = timing.Instant() 822 823 errors = len(self.stats.get("error", [])) 824 skipped = len(self.stats.get("skipped", [])) 825 deselected = len(self.stats.get("deselected", [])) 826 selected = self._numcollected - deselected 827 line = "collected " if final else "collecting " 828 line += ( 829 str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") 830 ) 831 if errors: 832 line += f" / {errors} error{'s' if errors != 1 else ''}" 833 if deselected: 834 line += f" / {deselected} deselected" 835 if skipped: 836 line += f" / {skipped} skipped" 837 if self._numcollected > selected: 838 line += f" / {selected} selected" 839 if self.isatty(): 840 self.rewrite(line, bold=True, erase=True) 841 if final: 842 self.write("\n") 843 else: 844 self.write_line(line) 845 846 @hookimpl(trylast=True) 847 def pytest_sessionstart(self, session: Session) -> None: 848 self._session = session 849 self._session_start = timing.Instant() 850 if not self.showheader: 851 return 852 self.write_sep("=", "test session starts", bold=True) 853 verinfo = platform.python_version() 854 if not self.no_header: 855 msg = f"platform {sys.platform} -- Python {verinfo}" 856 pypy_version_info = getattr(sys, "pypy_version_info", None) 857 if pypy_version_info: 858 verinfo = ".".join(map(str, pypy_version_info[:3])) 859 msg += f"[pypy-{verinfo}-{pypy_version_info[3]}]" 860 msg += f", pytest-{_pytest._version.version}, pluggy-{pluggy.__version__}" 861 if ( 862 self.verbosity > 0 863 or self.config.option.debug 864 or getattr(self.config.option, "pastebin", None) 865 ): 866 msg += " -- " + str(sys.executable) 867 self.write_line(msg) 868 lines = self.config.hook.pytest_report_header( 869 config=self.config, start_path=self.startpath 870 ) 871 self._write_report_lines_from_hooks(lines) 872 873 def _write_report_lines_from_hooks( 874 self, lines: Sequence[str | Sequence[str]] 875 ) -> None: 876 for line_or_lines in reversed(lines): 877 if isinstance(line_or_lines, str): 878 self.write_line(line_or_lines) 879 else: 880 for line in line_or_lines: 881 self.write_line(line) 882 883 def pytest_report_header(self, config: Config) -> list[str]: 884 result = [f"rootdir: {config.rootpath}"] 885 886 if config.inipath: 887 warning = "" 888 if config._ignored_config_files: 889 warning = f" (WARNING: ignoring pytest config in {', '.join(config._ignored_config_files)}!)" 890 result.append( 891 "configfile: " + bestrelpath(config.rootpath, config.inipath) + warning 892 ) 893 894 if config.args_source == Config.ArgsSource.TESTPATHS: 895 testpaths: list[str] = config.getini("testpaths") 896 result.append("testpaths: {}".format(", ".join(testpaths))) 897 898 plugininfo = config.pluginmanager.list_plugin_distinfo() 899 if plugininfo: 900 result.append( 901 "plugins: {}".format(", ".join(_plugin_nameversions(plugininfo))) 902 ) 903 return result 904 905 def pytest_collection_finish(self, session: Session) -> None: 906 self.report_collect(True) 907 908 lines = self.config.hook.pytest_report_collectionfinish( 909 config=self.config, 910 start_path=self.startpath, 911 items=session.items, 912 ) 913 self._write_report_lines_from_hooks(lines) 914 915 if self.config.getoption("collectonly"): 916 if session.items: 917 if self.config.option.verbose > -1: 918 self._tw.line("") 919 self._printcollecteditems(session.items) 920 921 failed = self.stats.get("failed") 922 if failed: 923 self._tw.sep("!", "collection failures") 924 for rep in failed: 925 rep.toterminal(self._tw) 926 927 def _printcollecteditems(self, items: Sequence[Item]) -> None: 928 test_cases_verbosity = self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) 929 if test_cases_verbosity < 0: 930 if test_cases_verbosity < -1: 931 counts = Counter(item.nodeid.split("::", 1)[0] for item in items) 932 for name, count in sorted(counts.items()): 933 self._tw.line(f"{name}: {count}") 934 else: 935 for item in items: 936 self._tw.line(item.nodeid) 937 return 938 stack: list[Node] = [] 939 indent = "" 940 for item in items: 941 needed_collectors = item.listchain()[1:] # strip root node 942 while stack: 943 if stack == needed_collectors[: len(stack)]: 944 break 945 stack.pop() 946 for col in needed_collectors[len(stack) :]: 947 stack.append(col) 948 indent = (len(stack) - 1) * " " 949 self._tw.line(f"{indent}{col}") 950 if test_cases_verbosity >= 1: 951 obj = getattr(col, "obj", None) 952 doc = inspect.getdoc(obj) if obj else None 953 if doc: 954 for line in doc.splitlines(): 955 self._tw.line("{}{}".format(indent + " ", line)) 956 957 @hookimpl(wrapper=True) 958 def pytest_sessionfinish( 959 self, session: Session, exitstatus: int | ExitCode 960 ) -> Generator[None]: 961 result = yield 962 self._tw.line("") 963 summary_exit_codes = ( 964 ExitCode.OK, 965 ExitCode.TESTS_FAILED, 966 ExitCode.INTERRUPTED, 967 ExitCode.USAGE_ERROR, 968 ExitCode.NO_TESTS_COLLECTED, 969 ) 970 if exitstatus in summary_exit_codes and not self.no_summary: 971 self.config.hook.pytest_terminal_summary( 972 terminalreporter=self, exitstatus=exitstatus, config=self.config 973 ) 974 if session.shouldfail: 975 self.write_sep("!", str(session.shouldfail), red=True) 976 if exitstatus == ExitCode.INTERRUPTED: 977 self._report_keyboardinterrupt() 978 self._keyboardinterrupt_memo = None 979 elif session.shouldstop: 980 self.write_sep("!", str(session.shouldstop), red=True) 981 self.summary_stats() 982 return result 983 984 @hookimpl(wrapper=True) 985 def pytest_terminal_summary(self) -> Generator[None]: 986 self.summary_errors() 987 self.summary_failures() 988 self.summary_xfailures() 989 self.summary_warnings() 990 self.summary_passes() 991 self.summary_xpasses() 992 try: 993 return (yield) 994 finally: 995 self.short_test_summary() 996 # Display any extra warnings from teardown here (if any). 997 self.summary_warnings() 998 999 def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None: 1000 self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) 1001 1002 def pytest_unconfigure(self) -> None: 1003 if self._keyboardinterrupt_memo is not None: 1004 self._report_keyboardinterrupt() 1005 1006 def _report_keyboardinterrupt(self) -> None: 1007 excrepr = self._keyboardinterrupt_memo 1008 assert excrepr is not None 1009 assert excrepr.reprcrash is not None 1010 msg = excrepr.reprcrash.message 1011 self.write_sep("!", msg) 1012 if "KeyboardInterrupt" in msg: 1013 if self.config.option.fulltrace: 1014 excrepr.toterminal(self._tw) 1015 else: 1016 excrepr.reprcrash.toterminal(self._tw) 1017 self._tw.line( 1018 "(to show a full traceback on KeyboardInterrupt use --full-trace)", 1019 yellow=True, 1020 ) 1021 1022 def _locationline( 1023 self, nodeid: str, fspath: str, lineno: int | None, domain: str 1024 ) -> str: 1025 def mkrel(nodeid: str) -> str: 1026 line = self.config.cwd_relative_nodeid(nodeid) 1027 if domain and line.endswith(domain): 1028 line = line[: -len(domain)] 1029 values = domain.split("[") 1030 values[0] = values[0].replace(".", "::") # don't replace '.' in params 1031 line += "[".join(values) 1032 return line 1033 1034 # fspath comes from testid which has a "/"-normalized path. 1035 if fspath: 1036 res = mkrel(nodeid) 1037 if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace( 1038 "\\", nodes.SEP 1039 ): 1040 res += " <- " + bestrelpath(self.startpath, Path(fspath)) 1041 else: 1042 res = "[location]" 1043 return res + " " 1044 1045 def _getfailureheadline(self, rep): 1046 head_line = rep.head_line 1047 if head_line: 1048 return head_line 1049 return "test session" # XXX? 1050 1051 def _getcrashline(self, rep): 1052 try: 1053 return str(rep.longrepr.reprcrash) 1054 except AttributeError: 1055 try: 1056 return str(rep.longrepr)[:50] 1057 except AttributeError: 1058 return "" 1059 1060 # 1061 # Summaries for sessionfinish. 1062 # 1063 def getreports(self, name: str): 1064 return [x for x in self.stats.get(name, ()) if not hasattr(x, "_pdbshown")] 1065 1066 def summary_warnings(self) -> None: 1067 if self.hasopt("w"): 1068 all_warnings: list[WarningReport] | None = self.stats.get("warnings") 1069 if not all_warnings: 1070 return 1071 1072 final = self._already_displayed_warnings is not None 1073 if final: 1074 warning_reports = all_warnings[self._already_displayed_warnings :] 1075 else: 1076 warning_reports = all_warnings 1077 self._already_displayed_warnings = len(warning_reports) 1078 if not warning_reports: 1079 return 1080 1081 reports_grouped_by_message: dict[str, list[WarningReport]] = {} 1082 for wr in warning_reports: 1083 reports_grouped_by_message.setdefault(wr.message, []).append(wr) 1084 1085 def collapsed_location_report(reports: list[WarningReport]) -> str: 1086 locations = [] 1087 for w in reports: 1088 location = w.get_location(self.config) 1089 if location: 1090 locations.append(location) 1091 1092 if len(locations) < 10: 1093 return "\n".join(map(str, locations)) 1094 1095 counts_by_filename = Counter( 1096 str(loc).split("::", 1)[0] for loc in locations 1097 ) 1098 return "\n".join( 1099 "{}: {} warning{}".format(k, v, "s" if v > 1 else "") 1100 for k, v in counts_by_filename.items() 1101 ) 1102 1103 title = "warnings summary (final)" if final else "warnings summary" 1104 self.write_sep("=", title, yellow=True, bold=False) 1105 for message, message_reports in reports_grouped_by_message.items(): 1106 maybe_location = collapsed_location_report(message_reports) 1107 if maybe_location: 1108 self._tw.line(maybe_location) 1109 lines = message.splitlines() 1110 indented = "\n".join(" " + x for x in lines) 1111 message = indented.rstrip() 1112 else: 1113 message = message.rstrip() 1114 self._tw.line(message) 1115 self._tw.line() 1116 self._tw.line( 1117 "-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html" 1118 ) 1119 1120 def summary_passes(self) -> None: 1121 self.summary_passes_combined("passed", "PASSES", "P") 1122 1123 def summary_xpasses(self) -> None: 1124 self.summary_passes_combined("xpassed", "XPASSES", "X") 1125 1126 def summary_passes_combined( 1127 self, which_reports: str, sep_title: str, needed_opt: str 1128 ) -> None: 1129 if self.config.option.tbstyle != "no": 1130 if self.hasopt(needed_opt): 1131 reports: list[TestReport] = self.getreports(which_reports) 1132 if not reports: 1133 return 1134 self.write_sep("=", sep_title) 1135 for rep in reports: 1136 if rep.sections: 1137 msg = self._getfailureheadline(rep) 1138 self.write_sep("_", msg, green=True, bold=True) 1139 self._outrep_summary(rep) 1140 self._handle_teardown_sections(rep.nodeid) 1141 1142 def _get_teardown_reports(self, nodeid: str) -> list[TestReport]: 1143 reports = self.getreports("") 1144 return [ 1145 report 1146 for report in reports 1147 if report.when == "teardown" and report.nodeid == nodeid 1148 ] 1149 1150 def _handle_teardown_sections(self, nodeid: str) -> None: 1151 for report in self._get_teardown_reports(nodeid): 1152 self.print_teardown_sections(report) 1153 1154 def print_teardown_sections(self, rep: TestReport) -> None: 1155 showcapture = self.config.option.showcapture 1156 if showcapture == "no": 1157 return 1158 for secname, content in rep.sections: 1159 if showcapture != "all" and showcapture not in secname: 1160 continue 1161 if "teardown" in secname: 1162 self._tw.sep("-", secname) 1163 if content[-1:] == "\n": 1164 content = content[:-1] 1165 self._tw.line(content) 1166 1167 def summary_failures(self) -> None: 1168 style = self.config.option.tbstyle 1169 self.summary_failures_combined("failed", "FAILURES", style=style) 1170 1171 def summary_xfailures(self) -> None: 1172 show_tb = self.config.option.xfail_tb 1173 style = self.config.option.tbstyle if show_tb else "no" 1174 self.summary_failures_combined("xfailed", "XFAILURES", style=style) 1175 1176 def summary_failures_combined( 1177 self, 1178 which_reports: str, 1179 sep_title: str, 1180 *, 1181 style: str, 1182 needed_opt: str | None = None, 1183 ) -> None: 1184 if style != "no": 1185 if not needed_opt or self.hasopt(needed_opt): 1186 reports: list[BaseReport] = self.getreports(which_reports) 1187 if not reports: 1188 return 1189 self.write_sep("=", sep_title) 1190 if style == "line": 1191 for rep in reports: 1192 line = self._getcrashline(rep) 1193 self._outrep_summary(rep) 1194 self.write_line(line) 1195 else: 1196 for rep in reports: 1197 msg = self._getfailureheadline(rep) 1198 self.write_sep("_", msg, red=True, bold=True) 1199 self._outrep_summary(rep) 1200 self._handle_teardown_sections(rep.nodeid) 1201 1202 def summary_errors(self) -> None: 1203 if self.config.option.tbstyle != "no": 1204 reports: list[BaseReport] = self.getreports("error") 1205 if not reports: 1206 return 1207 self.write_sep("=", "ERRORS") 1208 for rep in self.stats["error"]: 1209 msg = self._getfailureheadline(rep) 1210 if rep.when == "collect": 1211 msg = "ERROR collecting " + msg 1212 else: 1213 msg = f"ERROR at {rep.when} of {msg}" 1214 self.write_sep("_", msg, red=True, bold=True) 1215 self._outrep_summary(rep) 1216 1217 def _outrep_summary(self, rep: BaseReport) -> None: 1218 rep.toterminal(self._tw) 1219 showcapture = self.config.option.showcapture 1220 if showcapture == "no": 1221 return 1222 for secname, content in rep.sections: 1223 if showcapture != "all" and showcapture not in secname: 1224 continue 1225 self._tw.sep("-", secname) 1226 if content[-1:] == "\n": 1227 content = content[:-1] 1228 self._tw.line(content) 1229 1230 def summary_stats(self) -> None: 1231 if self.verbosity < -1: 1232 return 1233 1234 session_duration = self._session_start.elapsed() 1235 (parts, main_color) = self.build_summary_stats_line() 1236 line_parts = [] 1237 1238 display_sep = self.verbosity >= 0 1239 if display_sep: 1240 fullwidth = self._tw.fullwidth 1241 for text, markup in parts: 1242 with_markup = self._tw.markup(text, **markup) 1243 if display_sep: 1244 fullwidth += len(with_markup) - len(text) 1245 line_parts.append(with_markup) 1246 msg = ", ".join(line_parts) 1247 1248 main_markup = {main_color: True} 1249 duration = f" in {format_session_duration(session_duration.seconds)}" 1250 duration_with_markup = self._tw.markup(duration, **main_markup) 1251 if display_sep: 1252 fullwidth += len(duration_with_markup) - len(duration) 1253 msg += duration_with_markup 1254 1255 if display_sep: 1256 markup_for_end_sep = self._tw.markup("", **main_markup) 1257 if markup_for_end_sep.endswith("\x1b[0m"): 1258 markup_for_end_sep = markup_for_end_sep[:-4] 1259 fullwidth += len(markup_for_end_sep) 1260 msg += markup_for_end_sep 1261 1262 if display_sep: 1263 self.write_sep("=", msg, fullwidth=fullwidth, **main_markup) 1264 else: 1265 self.write_line(msg, **main_markup) 1266 1267 def short_test_summary(self) -> None: 1268 if not self.reportchars: 1269 return 1270 1271 def show_simple(lines: list[str], *, stat: str) -> None: 1272 failed = self.stats.get(stat, []) 1273 if not failed: 1274 return 1275 config = self.config 1276 for rep in failed: 1277 color = _color_for_type.get(stat, _color_for_type_default) 1278 line = _get_line_with_reprcrash_message( 1279 config, rep, self._tw, {color: True} 1280 ) 1281 lines.append(line) 1282 1283 def show_xfailed(lines: list[str]) -> None: 1284 xfailed = self.stats.get("xfailed", []) 1285 for rep in xfailed: 1286 verbose_word, verbose_markup = rep._get_verbose_word_with_markup( 1287 self.config, {_color_for_type["warnings"]: True} 1288 ) 1289 markup_word = self._tw.markup(verbose_word, **verbose_markup) 1290 nodeid = _get_node_id_with_markup(self._tw, self.config, rep) 1291 line = f"{markup_word} {nodeid}" 1292 reason = rep.wasxfail 1293 if reason: 1294 line += " - " + str(reason) 1295 1296 lines.append(line) 1297 1298 def show_xpassed(lines: list[str]) -> None: 1299 xpassed = self.stats.get("xpassed", []) 1300 for rep in xpassed: 1301 verbose_word, verbose_markup = rep._get_verbose_word_with_markup( 1302 self.config, {_color_for_type["warnings"]: True} 1303 ) 1304 markup_word = self._tw.markup(verbose_word, **verbose_markup) 1305 nodeid = _get_node_id_with_markup(self._tw, self.config, rep) 1306 line = f"{markup_word} {nodeid}" 1307 reason = rep.wasxfail 1308 if reason: 1309 line += " - " + str(reason) 1310 lines.append(line) 1311 1312 def show_skipped_folded(lines: list[str]) -> None: 1313 skipped: list[CollectReport] = self.stats.get("skipped", []) 1314 fskips = _folded_skips(self.startpath, skipped) if skipped else [] 1315 if not fskips: 1316 return 1317 verbose_word, verbose_markup = skipped[0]._get_verbose_word_with_markup( 1318 self.config, {_color_for_type["warnings"]: True} 1319 ) 1320 markup_word = self._tw.markup(verbose_word, **verbose_markup) 1321 prefix = "Skipped: " 1322 for num, fspath, lineno, reason in fskips: 1323 if reason.startswith(prefix): 1324 reason = reason[len(prefix) :] 1325 if lineno is not None: 1326 lines.append(f"{markup_word} [{num}] {fspath}:{lineno}: {reason}") 1327 else: 1328 lines.append(f"{markup_word} [{num}] {fspath}: {reason}") 1329 1330 def show_skipped_unfolded(lines: list[str]) -> None: 1331 skipped: list[CollectReport] = self.stats.get("skipped", []) 1332 1333 for rep in skipped: 1334 assert rep.longrepr is not None 1335 assert isinstance(rep.longrepr, tuple), (rep, rep.longrepr) 1336 assert len(rep.longrepr) == 3, (rep, rep.longrepr) 1337 1338 verbose_word, verbose_markup = rep._get_verbose_word_with_markup( 1339 self.config, {_color_for_type["warnings"]: True} 1340 ) 1341 markup_word = self._tw.markup(verbose_word, **verbose_markup) 1342 nodeid = _get_node_id_with_markup(self._tw, self.config, rep) 1343 line = f"{markup_word} {nodeid}" 1344 reason = rep.longrepr[2] 1345 if reason: 1346 line += " - " + str(reason) 1347 lines.append(line) 1348 1349 def show_skipped(lines: list[str]) -> None: 1350 if self.foldskipped: 1351 show_skipped_folded(lines) 1352 else: 1353 show_skipped_unfolded(lines) 1354 1355 REPORTCHAR_ACTIONS: Mapping[str, Callable[[list[str]], None]] = { 1356 "x": show_xfailed, 1357 "X": show_xpassed, 1358 "f": partial(show_simple, stat="failed"), 1359 "s": show_skipped, 1360 "p": partial(show_simple, stat="passed"), 1361 "E": partial(show_simple, stat="error"), 1362 } 1363 1364 lines: list[str] = [] 1365 for char in self.reportchars: 1366 action = REPORTCHAR_ACTIONS.get(char) 1367 if action: # skipping e.g. "P" (passed with output) here. 1368 action(lines) 1369 1370 if lines: 1371 self.write_sep("=", "short test summary info", cyan=True, bold=True) 1372 for line in lines: 1373 self.write_line(line) 1374 1375 def _get_main_color(self) -> tuple[str, list[str]]: 1376 if self._main_color is None or self._known_types is None or self._is_last_item: 1377 self._set_main_color() 1378 assert self._main_color 1379 assert self._known_types 1380 return self._main_color, self._known_types 1381 1382 def _determine_main_color(self, unknown_type_seen: bool) -> str: 1383 stats = self.stats 1384 if "failed" in stats or "error" in stats: 1385 main_color = "red" 1386 elif "warnings" in stats or "xpassed" in stats or unknown_type_seen: 1387 main_color = "yellow" 1388 elif "passed" in stats or not self._is_last_item: 1389 main_color = "green" 1390 else: 1391 main_color = "yellow" 1392 return main_color 1393 1394 def _set_main_color(self) -> None: 1395 unknown_types: list[str] = [] 1396 for found_type in self.stats: 1397 if found_type: # setup/teardown reports have an empty key, ignore them 1398 if found_type not in KNOWN_TYPES and found_type not in unknown_types: 1399 unknown_types.append(found_type) 1400 self._known_types = list(KNOWN_TYPES) + unknown_types 1401 self._main_color = self._determine_main_color(bool(unknown_types)) 1402 1403 def build_summary_stats_line(self) -> tuple[list[tuple[str, dict[str, bool]]], str]: 1404 """ 1405 Build the parts used in the last summary stats line. 1406 1407 The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===". 1408 1409 This function builds a list of the "parts" that make up for the text in that line, in 1410 the example above it would be:: 1411 1412 [ 1413 ("12 passed", {"green": True}), 1414 ("2 errors", {"red": True} 1415 ] 1416 1417 That last dict for each line is a "markup dictionary", used by TerminalWriter to 1418 color output. 1419 1420 The final color of the line is also determined by this function, and is the second 1421 element of the returned tuple. 1422 """ 1423 if self.config.getoption("collectonly"): 1424 return self._build_collect_only_summary_stats_line() 1425 else: 1426 return self._build_normal_summary_stats_line() 1427 1428 def _get_reports_to_display(self, key: str) -> list[Any]: 1429 """Get test/collection reports for the given status key, such as `passed` or `error`.""" 1430 reports = self.stats.get(key, []) 1431 return [x for x in reports if getattr(x, "count_towards_summary", True)] 1432 1433 def _build_normal_summary_stats_line( 1434 self, 1435 ) -> tuple[list[tuple[str, dict[str, bool]]], str]: 1436 main_color, known_types = self._get_main_color() 1437 parts = [] 1438 1439 for key in known_types: 1440 reports = self._get_reports_to_display(key) 1441 if reports: 1442 count = len(reports) 1443 color = _color_for_type.get(key, _color_for_type_default) 1444 markup = {color: True, "bold": color == main_color} 1445 parts.append(("%d %s" % pluralize(count, key), markup)) # noqa: UP031 1446 1447 if not parts: 1448 parts = [("no tests ran", {_color_for_type_default: True})] 1449 1450 return parts, main_color 1451 1452 def _build_collect_only_summary_stats_line( 1453 self, 1454 ) -> tuple[list[tuple[str, dict[str, bool]]], str]: 1455 deselected = len(self._get_reports_to_display("deselected")) 1456 errors = len(self._get_reports_to_display("error")) 1457 1458 if self._numcollected == 0: 1459 parts = [("no tests collected", {"yellow": True})] 1460 main_color = "yellow" 1461 1462 elif deselected == 0: 1463 main_color = "green" 1464 collected_output = "%d %s collected" % pluralize(self._numcollected, "test") # noqa: UP031 1465 parts = [(collected_output, {main_color: True})] 1466 else: 1467 all_tests_were_deselected = self._numcollected == deselected 1468 if all_tests_were_deselected: 1469 main_color = "yellow" 1470 collected_output = f"no tests collected ({deselected} deselected)" 1471 else: 1472 main_color = "green" 1473 selected = self._numcollected - deselected 1474 collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)" 1475 1476 parts = [(collected_output, {main_color: True})] 1477 1478 if errors: 1479 main_color = _color_for_type["error"] 1480 parts += [("%d %s" % pluralize(errors, "error"), {main_color: True})] # noqa: UP031 1481 1482 return parts, main_color
380 def __init__(self, config: Config, file: TextIO | None = None) -> None: 381 import _pytest.config 382 383 self.config = config 384 self._numcollected = 0 385 self._session: Session | None = None 386 self._showfspath: bool | None = None 387 388 self.stats: dict[str, list[Any]] = {} 389 self._main_color: str | None = None 390 self._known_types: list[str] | None = None 391 self.startpath = config.invocation_params.dir 392 if file is None: 393 file = sys.stdout 394 self._tw = _pytest.config.create_terminal_writer(config, file) 395 self._screen_width = self._tw.fullwidth 396 self.currentfspath: None | Path | str | int = None 397 self.reportchars = getreportopt(config) 398 self.foldskipped = config.option.fold_skipped 399 self.hasmarkup = self._tw.hasmarkup 400 # isatty should be a method but was wrongly implemented as a boolean. 401 # We use CallableBool here to support both. 402 self.isatty = compat.CallableBool(file.isatty()) 403 self._progress_nodeids_reported: set[str] = set() 404 self._timing_nodeids_reported: set[str] = set() 405 self._show_progress_info = self._determine_show_progress_info() 406 self._collect_report_last_write = timing.Instant() 407 self._already_displayed_warnings: int | None = None 408 self._keyboardinterrupt_memo: ExceptionRepr | None = None
466 @property 467 def reported_progress(self) -> int: 468 """The amount of items reported in the progress so far. 469 470 :meta private: 471 """ 472 return len(self._progress_nodeids_reported)
The amount of items reported in the progress so far.
:meta private:
478 def write_fspath_result(self, nodeid: str, res: str, **markup: bool) -> None: 479 fspath = self.config.rootpath / nodeid.split("::")[0] 480 if self.currentfspath is None or fspath != self.currentfspath: 481 if self.currentfspath is not None and self._show_progress_info: 482 self._write_progress_information_filling_space() 483 self.currentfspath = fspath 484 relfspath = bestrelpath(self.startpath, fspath) 485 self._tw.line() 486 self._tw.write(relfspath + " ") 487 self._tw.write(res, flush=True, **markup)
503 def wrap_write( 504 self, 505 content: str, 506 *, 507 flush: bool = False, 508 margin: int = 8, 509 line_sep: str = "\n", 510 **markup: bool, 511 ) -> None: 512 """Wrap message with margin for progress info.""" 513 width_of_current_line = self._tw.width_of_current_line 514 wrapped = line_sep.join( 515 textwrap.wrap( 516 " " * width_of_current_line + content, 517 width=self._screen_width - margin, 518 drop_whitespace=True, 519 replace_whitespace=False, 520 ), 521 ) 522 wrapped = wrapped[width_of_current_line:] 523 self._tw.write(wrapped, flush=flush, **markup)
Wrap message with margin for progress info.
540 def rewrite(self, line: str, **markup: bool) -> None: 541 """Rewinds the terminal cursor to the beginning and writes the given line. 542 543 :param erase: 544 If True, will also add spaces until the full terminal width to ensure 545 previous lines are properly erased. 546 547 The rest of the keyword arguments are markup instructions. 548 """ 549 erase = markup.pop("erase", False) 550 if erase: 551 fill_count = self._tw.fullwidth - len(line) - 1 552 fill = " " * fill_count 553 else: 554 fill = "" 555 line = str(line) 556 self._tw.write("\r" + line + fill, **markup)
Rewinds the terminal cursor to the beginning and writes the given line.
Parameters
- erase: If True, will also add spaces until the full terminal width to ensure previous lines are properly erased.
The rest of the keyword arguments are markup instructions.
585 def pytest_warning_recorded( 586 self, 587 warning_message: warnings.WarningMessage, 588 nodeid: str, 589 ) -> None: 590 from _pytest.warnings import warning_record_to_str 591 592 fslocation = warning_message.filename, warning_message.lineno 593 message = warning_record_to_str(warning_message) 594 595 warning_report = WarningReport( 596 fslocation=fslocation, message=message, nodeid=nodeid 597 ) 598 self._add_stats("warnings", [warning_report])
600 def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None: 601 if self.config.option.traceconfig: 602 msg = f"PLUGIN registered: {plugin}" 603 # XXX This event may happen during setup/teardown time 604 # which unfortunately captures our output here 605 # which garbles our output if we use self.write_line. 606 self.write_line(msg)
611 def pytest_runtest_logstart( 612 self, nodeid: str, location: tuple[str, int | None, str] 613 ) -> None: 614 fspath, lineno, domain = location 615 # Ensure that the path is printed before the 616 # 1st test of a module starts running. 617 if self.showlongtestinfo: 618 line = self._locationline(nodeid, fspath, lineno, domain) 619 self.write_ensure_prefix(line, "") 620 self.flush() 621 elif self.showfspath: 622 self.write_fspath_result(nodeid, "") 623 self.flush()
625 def pytest_runtest_logreport(self, report: TestReport) -> None: 626 self._tests_ran = True 627 rep = report 628 629 res = TestShortLogReport( 630 *self.config.hook.pytest_report_teststatus(report=rep, config=self.config) 631 ) 632 category, letter, word = res.category, res.letter, res.word 633 if not isinstance(word, tuple): 634 markup = None 635 else: 636 word, markup = word 637 self._add_stats(category, [rep]) 638 if not letter and not word: 639 # Probably passed setup/teardown. 640 return 641 if markup is None: 642 was_xfail = hasattr(report, "wasxfail") 643 if rep.passed and not was_xfail: 644 markup = {"green": True} 645 elif rep.passed and was_xfail: 646 markup = {"yellow": True} 647 elif rep.failed: 648 markup = {"red": True} 649 elif rep.skipped: 650 markup = {"yellow": True} 651 else: 652 markup = {} 653 self._progress_nodeids_reported.add(rep.nodeid) 654 if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0: 655 self._tw.write(letter, **markup) 656 # When running in xdist, the logreport and logfinish of multiple 657 # items are interspersed, e.g. `logreport`, `logreport`, 658 # `logfinish`, `logfinish`. To avoid the "past edge" calculation 659 # from getting confused and overflowing (#7166), do the past edge 660 # printing here and not in logfinish, except for the 100% which 661 # should only be printed after all teardowns are finished. 662 if self._show_progress_info and not self._is_last_item: 663 self._write_progress_information_if_past_edge() 664 else: 665 line = self._locationline(rep.nodeid, *rep.location) 666 running_xdist = hasattr(rep, "node") 667 if not running_xdist: 668 self.write_ensure_prefix(line, word, **markup) 669 if rep.skipped or hasattr(report, "wasxfail"): 670 reason = _get_raw_skip_reason(rep) 671 if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) < 2: 672 available_width = ( 673 (self._tw.fullwidth - self._tw.width_of_current_line) 674 - len(" [100%]") 675 - 1 676 ) 677 formatted_reason = _format_trimmed( 678 " ({})", reason, available_width 679 ) 680 else: 681 formatted_reason = f" ({reason})" 682 683 if reason and formatted_reason is not None: 684 self.wrap_write(formatted_reason) 685 if self._show_progress_info: 686 self._write_progress_information_filling_space() 687 else: 688 self.ensure_newline() 689 self._tw.write(f"[{rep.node.gateway.id}]") 690 if self._show_progress_info: 691 self._tw.write( 692 self._get_progress_information_message() + " ", cyan=True 693 ) 694 else: 695 self._tw.write(" ") 696 self._tw.write(word, **markup) 697 self._tw.write(" " + line) 698 self.currentfspath = -2 699 self.flush()
706 @hookimpl(wrapper=True) 707 def pytest_runtestloop(self) -> Generator[None, object, object]: 708 result = yield 709 710 # Write the final/100% progress -- deferred until the loop is complete. 711 if ( 712 self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0 713 and self._show_progress_info 714 and self.reported_progress 715 ): 716 self._write_progress_information_filling_space() 717 718 return result
800 def pytest_collectreport(self, report: CollectReport) -> None: 801 if report.failed: 802 self._add_stats("error", [report]) 803 elif report.skipped: 804 self._add_stats("skipped", [report]) 805 items = [x for x in report.result if isinstance(x, Item)] 806 self._numcollected += len(items) 807 if self.isatty(): 808 self.report_collect()
810 def report_collect(self, final: bool = False) -> None: 811 if self.config.option.verbose < 0: 812 return 813 814 if not final: 815 # Only write the "collecting" report every `REPORT_COLLECTING_RESOLUTION`. 816 if ( 817 self._collect_report_last_write.elapsed().seconds 818 < REPORT_COLLECTING_RESOLUTION 819 ): 820 return 821 self._collect_report_last_write = timing.Instant() 822 823 errors = len(self.stats.get("error", [])) 824 skipped = len(self.stats.get("skipped", [])) 825 deselected = len(self.stats.get("deselected", [])) 826 selected = self._numcollected - deselected 827 line = "collected " if final else "collecting " 828 line += ( 829 str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") 830 ) 831 if errors: 832 line += f" / {errors} error{'s' if errors != 1 else ''}" 833 if deselected: 834 line += f" / {deselected} deselected" 835 if skipped: 836 line += f" / {skipped} skipped" 837 if self._numcollected > selected: 838 line += f" / {selected} selected" 839 if self.isatty(): 840 self.rewrite(line, bold=True, erase=True) 841 if final: 842 self.write("\n") 843 else: 844 self.write_line(line)
846 @hookimpl(trylast=True) 847 def pytest_sessionstart(self, session: Session) -> None: 848 self._session = session 849 self._session_start = timing.Instant() 850 if not self.showheader: 851 return 852 self.write_sep("=", "test session starts", bold=True) 853 verinfo = platform.python_version() 854 if not self.no_header: 855 msg = f"platform {sys.platform} -- Python {verinfo}" 856 pypy_version_info = getattr(sys, "pypy_version_info", None) 857 if pypy_version_info: 858 verinfo = ".".join(map(str, pypy_version_info[:3])) 859 msg += f"[pypy-{verinfo}-{pypy_version_info[3]}]" 860 msg += f", pytest-{_pytest._version.version}, pluggy-{pluggy.__version__}" 861 if ( 862 self.verbosity > 0 863 or self.config.option.debug 864 or getattr(self.config.option, "pastebin", None) 865 ): 866 msg += " -- " + str(sys.executable) 867 self.write_line(msg) 868 lines = self.config.hook.pytest_report_header( 869 config=self.config, start_path=self.startpath 870 ) 871 self._write_report_lines_from_hooks(lines)
883 def pytest_report_header(self, config: Config) -> list[str]: 884 result = [f"rootdir: {config.rootpath}"] 885 886 if config.inipath: 887 warning = "" 888 if config._ignored_config_files: 889 warning = f" (WARNING: ignoring pytest config in {', '.join(config._ignored_config_files)}!)" 890 result.append( 891 "configfile: " + bestrelpath(config.rootpath, config.inipath) + warning 892 ) 893 894 if config.args_source == Config.ArgsSource.TESTPATHS: 895 testpaths: list[str] = config.getini("testpaths") 896 result.append("testpaths: {}".format(", ".join(testpaths))) 897 898 plugininfo = config.pluginmanager.list_plugin_distinfo() 899 if plugininfo: 900 result.append( 901 "plugins: {}".format(", ".join(_plugin_nameversions(plugininfo))) 902 ) 903 return result
905 def pytest_collection_finish(self, session: Session) -> None: 906 self.report_collect(True) 907 908 lines = self.config.hook.pytest_report_collectionfinish( 909 config=self.config, 910 start_path=self.startpath, 911 items=session.items, 912 ) 913 self._write_report_lines_from_hooks(lines) 914 915 if self.config.getoption("collectonly"): 916 if session.items: 917 if self.config.option.verbose > -1: 918 self._tw.line("") 919 self._printcollecteditems(session.items) 920 921 failed = self.stats.get("failed") 922 if failed: 923 self._tw.sep("!", "collection failures") 924 for rep in failed: 925 rep.toterminal(self._tw)
957 @hookimpl(wrapper=True) 958 def pytest_sessionfinish( 959 self, session: Session, exitstatus: int | ExitCode 960 ) -> Generator[None]: 961 result = yield 962 self._tw.line("") 963 summary_exit_codes = ( 964 ExitCode.OK, 965 ExitCode.TESTS_FAILED, 966 ExitCode.INTERRUPTED, 967 ExitCode.USAGE_ERROR, 968 ExitCode.NO_TESTS_COLLECTED, 969 ) 970 if exitstatus in summary_exit_codes and not self.no_summary: 971 self.config.hook.pytest_terminal_summary( 972 terminalreporter=self, exitstatus=exitstatus, config=self.config 973 ) 974 if session.shouldfail: 975 self.write_sep("!", str(session.shouldfail), red=True) 976 if exitstatus == ExitCode.INTERRUPTED: 977 self._report_keyboardinterrupt() 978 self._keyboardinterrupt_memo = None 979 elif session.shouldstop: 980 self.write_sep("!", str(session.shouldstop), red=True) 981 self.summary_stats() 982 return result
984 @hookimpl(wrapper=True) 985 def pytest_terminal_summary(self) -> Generator[None]: 986 self.summary_errors() 987 self.summary_failures() 988 self.summary_xfailures() 989 self.summary_warnings() 990 self.summary_passes() 991 self.summary_xpasses() 992 try: 993 return (yield) 994 finally: 995 self.short_test_summary() 996 # Display any extra warnings from teardown here (if any). 997 self.summary_warnings()
1066 def summary_warnings(self) -> None: 1067 if self.hasopt("w"): 1068 all_warnings: list[WarningReport] | None = self.stats.get("warnings") 1069 if not all_warnings: 1070 return 1071 1072 final = self._already_displayed_warnings is not None 1073 if final: 1074 warning_reports = all_warnings[self._already_displayed_warnings :] 1075 else: 1076 warning_reports = all_warnings 1077 self._already_displayed_warnings = len(warning_reports) 1078 if not warning_reports: 1079 return 1080 1081 reports_grouped_by_message: dict[str, list[WarningReport]] = {} 1082 for wr in warning_reports: 1083 reports_grouped_by_message.setdefault(wr.message, []).append(wr) 1084 1085 def collapsed_location_report(reports: list[WarningReport]) -> str: 1086 locations = [] 1087 for w in reports: 1088 location = w.get_location(self.config) 1089 if location: 1090 locations.append(location) 1091 1092 if len(locations) < 10: 1093 return "\n".join(map(str, locations)) 1094 1095 counts_by_filename = Counter( 1096 str(loc).split("::", 1)[0] for loc in locations 1097 ) 1098 return "\n".join( 1099 "{}: {} warning{}".format(k, v, "s" if v > 1 else "") 1100 for k, v in counts_by_filename.items() 1101 ) 1102 1103 title = "warnings summary (final)" if final else "warnings summary" 1104 self.write_sep("=", title, yellow=True, bold=False) 1105 for message, message_reports in reports_grouped_by_message.items(): 1106 maybe_location = collapsed_location_report(message_reports) 1107 if maybe_location: 1108 self._tw.line(maybe_location) 1109 lines = message.splitlines() 1110 indented = "\n".join(" " + x for x in lines) 1111 message = indented.rstrip() 1112 else: 1113 message = message.rstrip() 1114 self._tw.line(message) 1115 self._tw.line() 1116 self._tw.line( 1117 "-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html" 1118 )
1126 def summary_passes_combined( 1127 self, which_reports: str, sep_title: str, needed_opt: str 1128 ) -> None: 1129 if self.config.option.tbstyle != "no": 1130 if self.hasopt(needed_opt): 1131 reports: list[TestReport] = self.getreports(which_reports) 1132 if not reports: 1133 return 1134 self.write_sep("=", sep_title) 1135 for rep in reports: 1136 if rep.sections: 1137 msg = self._getfailureheadline(rep) 1138 self.write_sep("_", msg, green=True, bold=True) 1139 self._outrep_summary(rep) 1140 self._handle_teardown_sections(rep.nodeid)
1154 def print_teardown_sections(self, rep: TestReport) -> None: 1155 showcapture = self.config.option.showcapture 1156 if showcapture == "no": 1157 return 1158 for secname, content in rep.sections: 1159 if showcapture != "all" and showcapture not in secname: 1160 continue 1161 if "teardown" in secname: 1162 self._tw.sep("-", secname) 1163 if content[-1:] == "\n": 1164 content = content[:-1] 1165 self._tw.line(content)
1176 def summary_failures_combined( 1177 self, 1178 which_reports: str, 1179 sep_title: str, 1180 *, 1181 style: str, 1182 needed_opt: str | None = None, 1183 ) -> None: 1184 if style != "no": 1185 if not needed_opt or self.hasopt(needed_opt): 1186 reports: list[BaseReport] = self.getreports(which_reports) 1187 if not reports: 1188 return 1189 self.write_sep("=", sep_title) 1190 if style == "line": 1191 for rep in reports: 1192 line = self._getcrashline(rep) 1193 self._outrep_summary(rep) 1194 self.write_line(line) 1195 else: 1196 for rep in reports: 1197 msg = self._getfailureheadline(rep) 1198 self.write_sep("_", msg, red=True, bold=True) 1199 self._outrep_summary(rep) 1200 self._handle_teardown_sections(rep.nodeid)
1202 def summary_errors(self) -> None: 1203 if self.config.option.tbstyle != "no": 1204 reports: list[BaseReport] = self.getreports("error") 1205 if not reports: 1206 return 1207 self.write_sep("=", "ERRORS") 1208 for rep in self.stats["error"]: 1209 msg = self._getfailureheadline(rep) 1210 if rep.when == "collect": 1211 msg = "ERROR collecting " + msg 1212 else: 1213 msg = f"ERROR at {rep.when} of {msg}" 1214 self.write_sep("_", msg, red=True, bold=True) 1215 self._outrep_summary(rep)
1230 def summary_stats(self) -> None: 1231 if self.verbosity < -1: 1232 return 1233 1234 session_duration = self._session_start.elapsed() 1235 (parts, main_color) = self.build_summary_stats_line() 1236 line_parts = [] 1237 1238 display_sep = self.verbosity >= 0 1239 if display_sep: 1240 fullwidth = self._tw.fullwidth 1241 for text, markup in parts: 1242 with_markup = self._tw.markup(text, **markup) 1243 if display_sep: 1244 fullwidth += len(with_markup) - len(text) 1245 line_parts.append(with_markup) 1246 msg = ", ".join(line_parts) 1247 1248 main_markup = {main_color: True} 1249 duration = f" in {format_session_duration(session_duration.seconds)}" 1250 duration_with_markup = self._tw.markup(duration, **main_markup) 1251 if display_sep: 1252 fullwidth += len(duration_with_markup) - len(duration) 1253 msg += duration_with_markup 1254 1255 if display_sep: 1256 markup_for_end_sep = self._tw.markup("", **main_markup) 1257 if markup_for_end_sep.endswith("\x1b[0m"): 1258 markup_for_end_sep = markup_for_end_sep[:-4] 1259 fullwidth += len(markup_for_end_sep) 1260 msg += markup_for_end_sep 1261 1262 if display_sep: 1263 self.write_sep("=", msg, fullwidth=fullwidth, **main_markup) 1264 else: 1265 self.write_line(msg, **main_markup)
1267 def short_test_summary(self) -> None: 1268 if not self.reportchars: 1269 return 1270 1271 def show_simple(lines: list[str], *, stat: str) -> None: 1272 failed = self.stats.get(stat, []) 1273 if not failed: 1274 return 1275 config = self.config 1276 for rep in failed: 1277 color = _color_for_type.get(stat, _color_for_type_default) 1278 line = _get_line_with_reprcrash_message( 1279 config, rep, self._tw, {color: True} 1280 ) 1281 lines.append(line) 1282 1283 def show_xfailed(lines: list[str]) -> None: 1284 xfailed = self.stats.get("xfailed", []) 1285 for rep in xfailed: 1286 verbose_word, verbose_markup = rep._get_verbose_word_with_markup( 1287 self.config, {_color_for_type["warnings"]: True} 1288 ) 1289 markup_word = self._tw.markup(verbose_word, **verbose_markup) 1290 nodeid = _get_node_id_with_markup(self._tw, self.config, rep) 1291 line = f"{markup_word} {nodeid}" 1292 reason = rep.wasxfail 1293 if reason: 1294 line += " - " + str(reason) 1295 1296 lines.append(line) 1297 1298 def show_xpassed(lines: list[str]) -> None: 1299 xpassed = self.stats.get("xpassed", []) 1300 for rep in xpassed: 1301 verbose_word, verbose_markup = rep._get_verbose_word_with_markup( 1302 self.config, {_color_for_type["warnings"]: True} 1303 ) 1304 markup_word = self._tw.markup(verbose_word, **verbose_markup) 1305 nodeid = _get_node_id_with_markup(self._tw, self.config, rep) 1306 line = f"{markup_word} {nodeid}" 1307 reason = rep.wasxfail 1308 if reason: 1309 line += " - " + str(reason) 1310 lines.append(line) 1311 1312 def show_skipped_folded(lines: list[str]) -> None: 1313 skipped: list[CollectReport] = self.stats.get("skipped", []) 1314 fskips = _folded_skips(self.startpath, skipped) if skipped else [] 1315 if not fskips: 1316 return 1317 verbose_word, verbose_markup = skipped[0]._get_verbose_word_with_markup( 1318 self.config, {_color_for_type["warnings"]: True} 1319 ) 1320 markup_word = self._tw.markup(verbose_word, **verbose_markup) 1321 prefix = "Skipped: " 1322 for num, fspath, lineno, reason in fskips: 1323 if reason.startswith(prefix): 1324 reason = reason[len(prefix) :] 1325 if lineno is not None: 1326 lines.append(f"{markup_word} [{num}] {fspath}:{lineno}: {reason}") 1327 else: 1328 lines.append(f"{markup_word} [{num}] {fspath}: {reason}") 1329 1330 def show_skipped_unfolded(lines: list[str]) -> None: 1331 skipped: list[CollectReport] = self.stats.get("skipped", []) 1332 1333 for rep in skipped: 1334 assert rep.longrepr is not None 1335 assert isinstance(rep.longrepr, tuple), (rep, rep.longrepr) 1336 assert len(rep.longrepr) == 3, (rep, rep.longrepr) 1337 1338 verbose_word, verbose_markup = rep._get_verbose_word_with_markup( 1339 self.config, {_color_for_type["warnings"]: True} 1340 ) 1341 markup_word = self._tw.markup(verbose_word, **verbose_markup) 1342 nodeid = _get_node_id_with_markup(self._tw, self.config, rep) 1343 line = f"{markup_word} {nodeid}" 1344 reason = rep.longrepr[2] 1345 if reason: 1346 line += " - " + str(reason) 1347 lines.append(line) 1348 1349 def show_skipped(lines: list[str]) -> None: 1350 if self.foldskipped: 1351 show_skipped_folded(lines) 1352 else: 1353 show_skipped_unfolded(lines) 1354 1355 REPORTCHAR_ACTIONS: Mapping[str, Callable[[list[str]], None]] = { 1356 "x": show_xfailed, 1357 "X": show_xpassed, 1358 "f": partial(show_simple, stat="failed"), 1359 "s": show_skipped, 1360 "p": partial(show_simple, stat="passed"), 1361 "E": partial(show_simple, stat="error"), 1362 } 1363 1364 lines: list[str] = [] 1365 for char in self.reportchars: 1366 action = REPORTCHAR_ACTIONS.get(char) 1367 if action: # skipping e.g. "P" (passed with output) here. 1368 action(lines) 1369 1370 if lines: 1371 self.write_sep("=", "short test summary info", cyan=True, bold=True) 1372 for line in lines: 1373 self.write_line(line)
1403 def build_summary_stats_line(self) -> tuple[list[tuple[str, dict[str, bool]]], str]: 1404 """ 1405 Build the parts used in the last summary stats line. 1406 1407 The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===". 1408 1409 This function builds a list of the "parts" that make up for the text in that line, in 1410 the example above it would be:: 1411 1412 [ 1413 ("12 passed", {"green": True}), 1414 ("2 errors", {"red": True} 1415 ] 1416 1417 That last dict for each line is a "markup dictionary", used by TerminalWriter to 1418 color output. 1419 1420 The final color of the line is also determined by this function, and is the second 1421 element of the returned tuple. 1422 """ 1423 if self.config.getoption("collectonly"): 1424 return self._build_collect_only_summary_stats_line() 1425 else: 1426 return self._build_normal_summary_stats_line()
Build the parts used in the last summary stats line.
The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===".
This function builds a list of the "parts" that make up for the text in that line, in the example above it would be::
[
("12 passed", {"green": True}),
("2 errors", {"red": True}
]
That last dict for each line is a "markup dictionary", used by TerminalWriter to color output.
The final color of the line is also determined by this function, and is the second element of the returned tuple.
306class TestReport(BaseReport): 307 """Basic test report object (also used for setup and teardown calls if 308 they fail). 309 310 Reports can contain arbitrary extra attributes. 311 """ 312 313 __test__ = False 314 315 # Defined by skipping plugin. 316 # xfail reason if xfailed, otherwise not defined. Use hasattr to distinguish. 317 wasxfail: str 318 319 def __init__( 320 self, 321 nodeid: str, 322 location: tuple[str, int | None, str], 323 keywords: Mapping[str, Any], 324 outcome: Literal["passed", "failed", "skipped"], 325 longrepr: None 326 | ExceptionInfo[BaseException] 327 | tuple[str, int, str] 328 | str 329 | TerminalRepr, 330 when: Literal["setup", "call", "teardown"], 331 sections: Iterable[tuple[str, str]] = (), 332 duration: float = 0, 333 start: float = 0, 334 stop: float = 0, 335 user_properties: Iterable[tuple[str, object]] | None = None, 336 **extra, 337 ) -> None: 338 #: Normalized collection nodeid. 339 self.nodeid = nodeid 340 341 #: A (filesystempath, lineno, domaininfo) tuple indicating the 342 #: actual location of a test item - it might be different from the 343 #: collected one e.g. if a method is inherited from a different module. 344 #: The filesystempath may be relative to ``config.rootdir``. 345 #: The line number is 0-based. 346 self.location: tuple[str, int | None, str] = location 347 348 #: A name -> value dictionary containing all keywords and 349 #: markers associated with a test invocation. 350 self.keywords: Mapping[str, Any] = keywords 351 352 #: Test outcome, always one of "passed", "failed", "skipped". 353 self.outcome = outcome 354 355 #: None or a failure representation. 356 self.longrepr = longrepr 357 358 #: One of 'setup', 'call', 'teardown' to indicate runtest phase. 359 self.when: Literal["setup", "call", "teardown"] = when 360 361 #: User properties is a list of tuples (name, value) that holds user 362 #: defined properties of the test. 363 self.user_properties = list(user_properties or []) 364 365 #: Tuples of str ``(heading, content)`` with extra information 366 #: for the test report. Used by pytest to add text captured 367 #: from ``stdout``, ``stderr``, and intercepted logging events. May 368 #: be used by other plugins to add arbitrary information to reports. 369 self.sections = list(sections) 370 371 #: Time it took to run just the test. 372 self.duration: float = duration 373 374 #: The system time when the call started, in seconds since the epoch. 375 self.start: float = start 376 #: The system time when the call ended, in seconds since the epoch. 377 self.stop: float = stop 378 379 self.__dict__.update(extra) 380 381 def __repr__(self) -> str: 382 return f"<{self.__class__.__name__} {self.nodeid!r} when={self.when!r} outcome={self.outcome!r}>" 383 384 @classmethod 385 def from_item_and_call(cls, item: Item, call: CallInfo[None]) -> TestReport: 386 """Create and fill a TestReport with standard item and call info. 387 388 :param item: The item. 389 :param call: The call info. 390 """ 391 when = call.when 392 # Remove "collect" from the Literal type -- only for collection calls. 393 assert when != "collect" 394 duration = call.duration 395 start = call.start 396 stop = call.stop 397 keywords = {x: 1 for x in item.keywords} 398 excinfo = call.excinfo 399 sections = [] 400 if not call.excinfo: 401 outcome: Literal["passed", "failed", "skipped"] = "passed" 402 longrepr: ( 403 None 404 | ExceptionInfo[BaseException] 405 | tuple[str, int, str] 406 | str 407 | TerminalRepr 408 ) = None 409 else: 410 if not isinstance(excinfo, ExceptionInfo): 411 outcome = "failed" 412 longrepr = excinfo 413 elif isinstance(excinfo.value, skip.Exception): 414 outcome = "skipped" 415 r = excinfo._getreprcrash() 416 assert r is not None, ( 417 "There should always be a traceback entry for skipping a test." 418 ) 419 if excinfo.value._use_item_location: 420 path, line = item.reportinfo()[:2] 421 assert line is not None 422 longrepr = (os.fspath(path), line + 1, r.message) 423 else: 424 longrepr = (str(r.path), r.lineno, r.message) 425 elif isinstance(excinfo.value, BaseExceptionGroup) and ( 426 excinfo.value.split(skip.Exception)[1] is None 427 ): 428 # All exceptions in the group are skip exceptions. 429 outcome = "skipped" 430 excinfo = cast( 431 ExceptionInfo[ 432 BaseExceptionGroup[BaseException | BaseExceptionGroup] 433 ], 434 excinfo, 435 ) 436 longrepr = _format_exception_group_all_skipped_longrepr(item, excinfo) 437 else: 438 outcome = "failed" 439 longrepr = _format_failed_longrepr(item, call, excinfo) 440 for rwhen, key, content in item._report_sections: 441 sections.append((f"Captured {key} {rwhen}", content)) 442 return cls( 443 item.nodeid, 444 item.location, 445 keywords, 446 outcome, 447 longrepr, 448 when, 449 sections, 450 duration, 451 start, 452 stop, 453 user_properties=item.user_properties, 454 )
Basic test report object (also used for setup and teardown calls if they fail).
Reports can contain arbitrary extra attributes.
319 def __init__( 320 self, 321 nodeid: str, 322 location: tuple[str, int | None, str], 323 keywords: Mapping[str, Any], 324 outcome: Literal["passed", "failed", "skipped"], 325 longrepr: None 326 | ExceptionInfo[BaseException] 327 | tuple[str, int, str] 328 | str 329 | TerminalRepr, 330 when: Literal["setup", "call", "teardown"], 331 sections: Iterable[tuple[str, str]] = (), 332 duration: float = 0, 333 start: float = 0, 334 stop: float = 0, 335 user_properties: Iterable[tuple[str, object]] | None = None, 336 **extra, 337 ) -> None: 338 #: Normalized collection nodeid. 339 self.nodeid = nodeid 340 341 #: A (filesystempath, lineno, domaininfo) tuple indicating the 342 #: actual location of a test item - it might be different from the 343 #: collected one e.g. if a method is inherited from a different module. 344 #: The filesystempath may be relative to ``config.rootdir``. 345 #: The line number is 0-based. 346 self.location: tuple[str, int | None, str] = location 347 348 #: A name -> value dictionary containing all keywords and 349 #: markers associated with a test invocation. 350 self.keywords: Mapping[str, Any] = keywords 351 352 #: Test outcome, always one of "passed", "failed", "skipped". 353 self.outcome = outcome 354 355 #: None or a failure representation. 356 self.longrepr = longrepr 357 358 #: One of 'setup', 'call', 'teardown' to indicate runtest phase. 359 self.when: Literal["setup", "call", "teardown"] = when 360 361 #: User properties is a list of tuples (name, value) that holds user 362 #: defined properties of the test. 363 self.user_properties = list(user_properties or []) 364 365 #: Tuples of str ``(heading, content)`` with extra information 366 #: for the test report. Used by pytest to add text captured 367 #: from ``stdout``, ``stderr``, and intercepted logging events. May 368 #: be used by other plugins to add arbitrary information to reports. 369 self.sections = list(sections) 370 371 #: Time it took to run just the test. 372 self.duration: float = duration 373 374 #: The system time when the call started, in seconds since the epoch. 375 self.start: float = start 376 #: The system time when the call ended, in seconds since the epoch. 377 self.stop: float = stop 378 379 self.__dict__.update(extra)
384 @classmethod 385 def from_item_and_call(cls, item: Item, call: CallInfo[None]) -> TestReport: 386 """Create and fill a TestReport with standard item and call info. 387 388 :param item: The item. 389 :param call: The call info. 390 """ 391 when = call.when 392 # Remove "collect" from the Literal type -- only for collection calls. 393 assert when != "collect" 394 duration = call.duration 395 start = call.start 396 stop = call.stop 397 keywords = {x: 1 for x in item.keywords} 398 excinfo = call.excinfo 399 sections = [] 400 if not call.excinfo: 401 outcome: Literal["passed", "failed", "skipped"] = "passed" 402 longrepr: ( 403 None 404 | ExceptionInfo[BaseException] 405 | tuple[str, int, str] 406 | str 407 | TerminalRepr 408 ) = None 409 else: 410 if not isinstance(excinfo, ExceptionInfo): 411 outcome = "failed" 412 longrepr = excinfo 413 elif isinstance(excinfo.value, skip.Exception): 414 outcome = "skipped" 415 r = excinfo._getreprcrash() 416 assert r is not None, ( 417 "There should always be a traceback entry for skipping a test." 418 ) 419 if excinfo.value._use_item_location: 420 path, line = item.reportinfo()[:2] 421 assert line is not None 422 longrepr = (os.fspath(path), line + 1, r.message) 423 else: 424 longrepr = (str(r.path), r.lineno, r.message) 425 elif isinstance(excinfo.value, BaseExceptionGroup) and ( 426 excinfo.value.split(skip.Exception)[1] is None 427 ): 428 # All exceptions in the group are skip exceptions. 429 outcome = "skipped" 430 excinfo = cast( 431 ExceptionInfo[ 432 BaseExceptionGroup[BaseException | BaseExceptionGroup] 433 ], 434 excinfo, 435 ) 436 longrepr = _format_exception_group_all_skipped_longrepr(item, excinfo) 437 else: 438 outcome = "failed" 439 longrepr = _format_failed_longrepr(item, call, excinfo) 440 for rwhen, key, content in item._report_sections: 441 sections.append((f"Captured {key} {rwhen}", content)) 442 return cls( 443 item.nodeid, 444 item.location, 445 keywords, 446 outcome, 447 longrepr, 448 when, 449 sections, 450 duration, 451 start, 452 stop, 453 user_properties=item.user_properties, 454 )
Create and fill a TestReport with standard item and call info.
Parameters
- item: The item.
- call: The call info.
118class TestShortLogReport(NamedTuple): 119 """Used to store the test status result category, shortletter and verbose word. 120 For example ``"rerun", "R", ("RERUN", {"yellow": True})``. 121 122 :ivar category: 123 The class of result, for example ``“passed”``, ``“skipped”``, ``“error”``, or the empty string. 124 125 :ivar letter: 126 The short letter shown as testing progresses, for example ``"."``, ``"s"``, ``"E"``, or the empty string. 127 128 :ivar word: 129 Verbose word is shown as testing progresses in verbose mode, for example ``"PASSED"``, ``"SKIPPED"``, 130 ``"ERROR"``, or the empty string. 131 """ 132 133 category: str 134 letter: str 135 word: str | tuple[str, Mapping[str, bool]]
Used to store the test status result category, shortletter and verbose word.
For example "rerun", "R", ("RERUN", {"yellow": True}).
:ivar category:
The class of result, for example “passed”, “skipped”, “error”, or the empty string.
:ivar letter:
The short letter shown as testing progresses, for example ".", "s", "E", or the empty string.
:ivar word:
Verbose word is shown as testing progresses in verbose mode, for example "PASSED", "SKIPPED",
"ERROR", or the empty string.
42@final 43class Testdir: 44 """ 45 Similar to :class:`Pytester`, but this class works with legacy legacy_path objects instead. 46 47 All methods just forward to an internal :class:`Pytester` instance, converting results 48 to `legacy_path` objects as necessary. 49 """ 50 51 __test__ = False 52 53 CLOSE_STDIN: Final = Pytester.CLOSE_STDIN 54 TimeoutExpired: Final = Pytester.TimeoutExpired 55 56 def __init__(self, pytester: Pytester, *, _ispytest: bool = False) -> None: 57 check_ispytest(_ispytest) 58 self._pytester = pytester 59 60 @property 61 def tmpdir(self) -> LEGACY_PATH: 62 """Temporary directory where tests are executed.""" 63 return legacy_path(self._pytester.path) 64 65 @property 66 def test_tmproot(self) -> LEGACY_PATH: 67 return legacy_path(self._pytester._test_tmproot) 68 69 @property 70 def request(self): 71 return self._pytester._request 72 73 @property 74 def plugins(self): 75 return self._pytester.plugins 76 77 @plugins.setter 78 def plugins(self, plugins): 79 self._pytester.plugins = plugins 80 81 @property 82 def monkeypatch(self) -> MonkeyPatch: 83 return self._pytester._monkeypatch 84 85 def make_hook_recorder(self, pluginmanager) -> HookRecorder: 86 """See :meth:`Pytester.make_hook_recorder`.""" 87 return self._pytester.make_hook_recorder(pluginmanager) 88 89 def chdir(self) -> None: 90 """See :meth:`Pytester.chdir`.""" 91 return self._pytester.chdir() 92 93 def finalize(self) -> None: 94 return self._pytester._finalize() 95 96 def makefile(self, ext, *args, **kwargs) -> LEGACY_PATH: 97 """See :meth:`Pytester.makefile`.""" 98 if ext and not ext.startswith("."): 99 # pytester.makefile is going to throw a ValueError in a way that 100 # testdir.makefile did not, because 101 # pathlib.Path is stricter suffixes than py.path 102 # This ext arguments is likely user error, but since testdir has 103 # allowed this, we will prepend "." as a workaround to avoid breaking 104 # testdir usage that worked before 105 ext = "." + ext 106 return legacy_path(self._pytester.makefile(ext, *args, **kwargs)) 107 108 def makeconftest(self, source) -> LEGACY_PATH: 109 """See :meth:`Pytester.makeconftest`.""" 110 return legacy_path(self._pytester.makeconftest(source)) 111 112 def makeini(self, source) -> LEGACY_PATH: 113 """See :meth:`Pytester.makeini`.""" 114 return legacy_path(self._pytester.makeini(source)) 115 116 def getinicfg(self, source: str) -> SectionWrapper: 117 """See :meth:`Pytester.getinicfg`.""" 118 return self._pytester.getinicfg(source) 119 120 def makepyprojecttoml(self, source) -> LEGACY_PATH: 121 """See :meth:`Pytester.makepyprojecttoml`.""" 122 return legacy_path(self._pytester.makepyprojecttoml(source)) 123 124 def makepyfile(self, *args, **kwargs) -> LEGACY_PATH: 125 """See :meth:`Pytester.makepyfile`.""" 126 return legacy_path(self._pytester.makepyfile(*args, **kwargs)) 127 128 def maketxtfile(self, *args, **kwargs) -> LEGACY_PATH: 129 """See :meth:`Pytester.maketxtfile`.""" 130 return legacy_path(self._pytester.maketxtfile(*args, **kwargs)) 131 132 def syspathinsert(self, path=None) -> None: 133 """See :meth:`Pytester.syspathinsert`.""" 134 return self._pytester.syspathinsert(path) 135 136 def mkdir(self, name) -> LEGACY_PATH: 137 """See :meth:`Pytester.mkdir`.""" 138 return legacy_path(self._pytester.mkdir(name)) 139 140 def mkpydir(self, name) -> LEGACY_PATH: 141 """See :meth:`Pytester.mkpydir`.""" 142 return legacy_path(self._pytester.mkpydir(name)) 143 144 def copy_example(self, name=None) -> LEGACY_PATH: 145 """See :meth:`Pytester.copy_example`.""" 146 return legacy_path(self._pytester.copy_example(name)) 147 148 def getnode(self, config: Config, arg) -> Item | Collector | None: 149 """See :meth:`Pytester.getnode`.""" 150 return self._pytester.getnode(config, arg) 151 152 def getpathnode(self, path): 153 """See :meth:`Pytester.getpathnode`.""" 154 return self._pytester.getpathnode(path) 155 156 def genitems(self, colitems: list[Item | Collector]) -> list[Item]: 157 """See :meth:`Pytester.genitems`.""" 158 return self._pytester.genitems(colitems) 159 160 def runitem(self, source): 161 """See :meth:`Pytester.runitem`.""" 162 return self._pytester.runitem(source) 163 164 def inline_runsource(self, source, *cmdlineargs): 165 """See :meth:`Pytester.inline_runsource`.""" 166 return self._pytester.inline_runsource(source, *cmdlineargs) 167 168 def inline_genitems(self, *args): 169 """See :meth:`Pytester.inline_genitems`.""" 170 return self._pytester.inline_genitems(*args) 171 172 def inline_run(self, *args, plugins=(), no_reraise_ctrlc: bool = False): 173 """See :meth:`Pytester.inline_run`.""" 174 return self._pytester.inline_run( 175 *args, plugins=plugins, no_reraise_ctrlc=no_reraise_ctrlc 176 ) 177 178 def runpytest_inprocess(self, *args, **kwargs) -> RunResult: 179 """See :meth:`Pytester.runpytest_inprocess`.""" 180 return self._pytester.runpytest_inprocess(*args, **kwargs) 181 182 def runpytest(self, *args, **kwargs) -> RunResult: 183 """See :meth:`Pytester.runpytest`.""" 184 return self._pytester.runpytest(*args, **kwargs) 185 186 def parseconfig(self, *args) -> Config: 187 """See :meth:`Pytester.parseconfig`.""" 188 return self._pytester.parseconfig(*args) 189 190 def parseconfigure(self, *args) -> Config: 191 """See :meth:`Pytester.parseconfigure`.""" 192 return self._pytester.parseconfigure(*args) 193 194 def getitem(self, source, funcname="test_func"): 195 """See :meth:`Pytester.getitem`.""" 196 return self._pytester.getitem(source, funcname) 197 198 def getitems(self, source): 199 """See :meth:`Pytester.getitems`.""" 200 return self._pytester.getitems(source) 201 202 def getmodulecol(self, source, configargs=(), withinit=False): 203 """See :meth:`Pytester.getmodulecol`.""" 204 return self._pytester.getmodulecol( 205 source, configargs=configargs, withinit=withinit 206 ) 207 208 def collect_by_name(self, modcol: Collector, name: str) -> Item | Collector | None: 209 """See :meth:`Pytester.collect_by_name`.""" 210 return self._pytester.collect_by_name(modcol, name) 211 212 def popen( 213 self, 214 cmdargs, 215 stdout=subprocess.PIPE, 216 stderr=subprocess.PIPE, 217 stdin=CLOSE_STDIN, 218 **kw, 219 ): 220 """See :meth:`Pytester.popen`.""" 221 return self._pytester.popen(cmdargs, stdout, stderr, stdin, **kw) 222 223 def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN) -> RunResult: 224 """See :meth:`Pytester.run`.""" 225 return self._pytester.run(*cmdargs, timeout=timeout, stdin=stdin) 226 227 def runpython(self, script) -> RunResult: 228 """See :meth:`Pytester.runpython`.""" 229 return self._pytester.runpython(script) 230 231 def runpython_c(self, command): 232 """See :meth:`Pytester.runpython_c`.""" 233 return self._pytester.runpython_c(command) 234 235 def runpytest_subprocess(self, *args, timeout=None) -> RunResult: 236 """See :meth:`Pytester.runpytest_subprocess`.""" 237 return self._pytester.runpytest_subprocess(*args, timeout=timeout) 238 239 def spawn_pytest(self, string: str, expect_timeout: float = 10.0) -> pexpect.spawn: 240 """See :meth:`Pytester.spawn_pytest`.""" 241 return self._pytester.spawn_pytest(string, expect_timeout=expect_timeout) 242 243 def spawn(self, cmd: str, expect_timeout: float = 10.0) -> pexpect.spawn: 244 """See :meth:`Pytester.spawn`.""" 245 return self._pytester.spawn(cmd, expect_timeout=expect_timeout) 246 247 def __repr__(self) -> str: 248 return f"<Testdir {self.tmpdir!r}>" 249 250 def __str__(self) -> str: 251 return str(self.tmpdir)
Similar to Pytester, but this class works with legacy legacy_path objects instead.
All methods just forward to an internal Pytester instance, converting results
to legacy_path objects as necessary.
60 @property 61 def tmpdir(self) -> LEGACY_PATH: 62 """Temporary directory where tests are executed.""" 63 return legacy_path(self._pytester.path)
Temporary directory where tests are executed.
96 def makefile(self, ext, *args, **kwargs) -> LEGACY_PATH: 97 """See :meth:`Pytester.makefile`.""" 98 if ext and not ext.startswith("."): 99 # pytester.makefile is going to throw a ValueError in a way that 100 # testdir.makefile did not, because 101 # pathlib.Path is stricter suffixes than py.path 102 # This ext arguments is likely user error, but since testdir has 103 # allowed this, we will prepend "." as a workaround to avoid breaking 104 # testdir usage that worked before 105 ext = "." + ext 106 return legacy_path(self._pytester.makefile(ext, *args, **kwargs))
See Pytester.makefile().
112 def makeini(self, source) -> LEGACY_PATH: 113 """See :meth:`Pytester.makeini`.""" 114 return legacy_path(self._pytester.makeini(source))
See Pytester.makeini().
116 def getinicfg(self, source: str) -> SectionWrapper: 117 """See :meth:`Pytester.getinicfg`.""" 118 return self._pytester.getinicfg(source)
See Pytester.getinicfg().
136 def mkdir(self, name) -> LEGACY_PATH: 137 """See :meth:`Pytester.mkdir`.""" 138 return legacy_path(self._pytester.mkdir(name))
See Pytester.mkdir().
140 def mkpydir(self, name) -> LEGACY_PATH: 141 """See :meth:`Pytester.mkpydir`.""" 142 return legacy_path(self._pytester.mkpydir(name))
See Pytester.mkpydir().
148 def getnode(self, config: Config, arg) -> Item | Collector | None: 149 """See :meth:`Pytester.getnode`.""" 150 return self._pytester.getnode(config, arg)
See Pytester.getnode().
156 def genitems(self, colitems: list[Item | Collector]) -> list[Item]: 157 """See :meth:`Pytester.genitems`.""" 158 return self._pytester.genitems(colitems)
See Pytester.genitems().
160 def runitem(self, source): 161 """See :meth:`Pytester.runitem`.""" 162 return self._pytester.runitem(source)
See Pytester.runitem().
182 def runpytest(self, *args, **kwargs) -> RunResult: 183 """See :meth:`Pytester.runpytest`.""" 184 return self._pytester.runpytest(*args, **kwargs)
See Pytester.runpytest().
194 def getitem(self, source, funcname="test_func"): 195 """See :meth:`Pytester.getitem`.""" 196 return self._pytester.getitem(source, funcname)
See Pytester.getitem().
198 def getitems(self, source): 199 """See :meth:`Pytester.getitems`.""" 200 return self._pytester.getitems(source)
See Pytester.getitems().
212 def popen( 213 self, 214 cmdargs, 215 stdout=subprocess.PIPE, 216 stderr=subprocess.PIPE, 217 stdin=CLOSE_STDIN, 218 **kw, 219 ): 220 """See :meth:`Pytester.popen`.""" 221 return self._pytester.popen(cmdargs, stdout, stderr, stdin, **kw)
See Pytester.popen().
223 def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN) -> RunResult: 224 """See :meth:`Pytester.run`.""" 225 return self._pytester.run(*cmdargs, timeout=timeout, stdin=stdin)
See Pytester.run().
227 def runpython(self, script) -> RunResult: 228 """See :meth:`Pytester.runpython`.""" 229 return self._pytester.runpython(script)
See Pytester.runpython().
243 def spawn(self, cmd: str, expect_timeout: float = 10.0) -> pexpect.spawn: 244 """See :meth:`Pytester.spawn`.""" 245 return self._pytester.spawn(cmd, expect_timeout=expect_timeout)
See Pytester.spawn().
Common base class for all non-exit exceptions.
7from _pytest import __version__
Error in pytest usage or invocation.
171class WarningsRecorder(warnings.catch_warnings): 172 """A context manager to record raised warnings. 173 174 Each recorded warning is an instance of :class:`warnings.WarningMessage`. 175 176 Adapted from `warnings.catch_warnings`. 177 178 .. note:: 179 ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated 180 differently; see :ref:`ensuring_function_triggers`. 181 182 """ 183 184 def __init__(self, *, _ispytest: bool = False) -> None: 185 check_ispytest(_ispytest) 186 super().__init__(record=True) 187 self._entered = False 188 self._list: list[warnings.WarningMessage] = [] 189 190 @property 191 def list(self) -> list[warnings.WarningMessage]: 192 """The list of recorded warnings.""" 193 return self._list 194 195 def __getitem__(self, i: int) -> warnings.WarningMessage: 196 """Get a recorded warning by index.""" 197 return self._list[i] 198 199 def __iter__(self) -> Iterator[warnings.WarningMessage]: 200 """Iterate through the recorded warnings.""" 201 return iter(self._list) 202 203 def __len__(self) -> int: 204 """The number of recorded warnings.""" 205 return len(self._list) 206 207 def pop(self, cls: type[Warning] = Warning) -> warnings.WarningMessage: 208 """Pop the first recorded warning which is an instance of ``cls``, 209 but not an instance of a child class of any other match. 210 Raises ``AssertionError`` if there is no match. 211 """ 212 best_idx: int | None = None 213 for i, w in enumerate(self._list): 214 if w.category == cls: 215 return self._list.pop(i) # exact match, stop looking 216 if issubclass(w.category, cls) and ( 217 best_idx is None 218 or not issubclass(w.category, self._list[best_idx].category) 219 ): 220 best_idx = i 221 if best_idx is not None: 222 return self._list.pop(best_idx) 223 __tracebackhide__ = True 224 raise AssertionError(f"{cls!r} not found in warning list") 225 226 def clear(self) -> None: 227 """Clear the list of recorded warnings.""" 228 self._list[:] = [] 229 230 # Type ignored because we basically want the `catch_warnings` generic type 231 # parameter to be ourselves but that is not possible(?). 232 def __enter__(self) -> Self: # type: ignore[override] 233 if self._entered: 234 __tracebackhide__ = True 235 raise RuntimeError(f"Cannot enter {self!r} twice") 236 _list = super().__enter__() 237 # record=True means it's None. 238 assert _list is not None 239 self._list = _list 240 warnings.simplefilter("always") 241 return self 242 243 def __exit__( 244 self, 245 exc_type: type[BaseException] | None, 246 exc_val: BaseException | None, 247 exc_tb: TracebackType | None, 248 ) -> None: 249 if not self._entered: 250 __tracebackhide__ = True 251 raise RuntimeError(f"Cannot exit {self!r} without entering first") 252 253 super().__exit__(exc_type, exc_val, exc_tb) 254 255 # Built-in catch_warnings does not reset entered state so we do it 256 # manually here for this context manager to become reusable. 257 self._entered = False
A context manager to record raised warnings.
Each recorded warning is an instance of warnings.WarningMessage.
Adapted from warnings.catch_warnings.
DeprecationWarning and PendingDeprecationWarning are treated
differently; see :ref:ensuring_function_triggers.
184 def __init__(self, *, _ispytest: bool = False) -> None: 185 check_ispytest(_ispytest) 186 super().__init__(record=True) 187 self._entered = False 188 self._list: list[warnings.WarningMessage] = []
Specify whether to record warnings and if an alternative module should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be keyword-only.
190 @property 191 def list(self) -> list[warnings.WarningMessage]: 192 """The list of recorded warnings.""" 193 return self._list
The list of recorded warnings.
207 def pop(self, cls: type[Warning] = Warning) -> warnings.WarningMessage: 208 """Pop the first recorded warning which is an instance of ``cls``, 209 but not an instance of a child class of any other match. 210 Raises ``AssertionError`` if there is no match. 211 """ 212 best_idx: int | None = None 213 for i, w in enumerate(self._list): 214 if w.category == cls: 215 return self._list.pop(i) # exact match, stop looking 216 if issubclass(w.category, cls) and ( 217 best_idx is None 218 or not issubclass(w.category, self._list[best_idx].category) 219 ): 220 best_idx = i 221 if best_idx is not None: 222 return self._list.pop(best_idx) 223 __tracebackhide__ = True 224 raise AssertionError(f"{cls!r} not found in warning list")
Pop the first recorded warning which is an instance of cls,
but not an instance of a child class of any other match.
Raises AssertionError if there is no match.
562def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase: 563 """Assert that two numbers (or two ordered sequences of numbers) are equal to each other 564 within some tolerance. 565 566 Due to the :doc:`python:tutorial/floatingpoint`, numbers that we 567 would intuitively expect to be equal are not always so:: 568 569 >>> 0.1 + 0.2 == 0.3 570 False 571 572 This problem is commonly encountered when writing tests, e.g. when making 573 sure that floating-point values are what you expect them to be. One way to 574 deal with this problem is to assert that two floating-point numbers are 575 equal to within some appropriate tolerance:: 576 577 >>> abs((0.1 + 0.2) - 0.3) < 1e-6 578 True 579 580 However, comparisons like this are tedious to write and difficult to 581 understand. Furthermore, absolute comparisons like the one above are 582 usually discouraged because there's no tolerance that works well for all 583 situations. ``1e-6`` is good for numbers around ``1``, but too small for 584 very big numbers and too big for very small ones. It's better to express 585 the tolerance as a fraction of the expected value, but relative comparisons 586 like that are even more difficult to write correctly and concisely. 587 588 The ``approx`` class performs floating-point comparisons using a syntax 589 that's as intuitive as possible:: 590 591 >>> from pytest import approx 592 >>> 0.1 + 0.2 == approx(0.3) 593 True 594 595 The same syntax also works for ordered sequences of numbers:: 596 597 >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) 598 True 599 600 ``numpy`` arrays:: 601 602 >>> import numpy as np # doctest: +SKIP 603 >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP 604 True 605 606 And for a ``numpy`` array against a scalar:: 607 608 >>> import numpy as np # doctest: +SKIP 609 >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP 610 True 611 612 Only ordered sequences are supported, because ``approx`` needs 613 to infer the relative position of the sequences without ambiguity. This means 614 ``sets`` and other unordered sequences are not supported. 615 616 Finally, dictionary *values* can also be compared:: 617 618 >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6}) 619 True 620 621 The comparison will be true if both mappings have the same keys and their 622 respective values match the expected tolerances. 623 624 **Tolerances** 625 626 By default, ``approx`` considers numbers within a relative tolerance of 627 ``1e-6`` (i.e. one part in a million) of its expected value to be equal. 628 This treatment would lead to surprising results if the expected value was 629 ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. 630 To handle this case less surprisingly, ``approx`` also considers numbers 631 within an absolute tolerance of ``1e-12`` of its expected value to be 632 equal. Infinity and NaN are special cases. Infinity is only considered 633 equal to itself, regardless of the relative tolerance. NaN is not 634 considered equal to anything by default, but you can make it be equal to 635 itself by setting the ``nan_ok`` argument to True. (This is meant to 636 facilitate comparing arrays that use NaN to mean "no data".) 637 638 Both the relative and absolute tolerances can be changed by passing 639 arguments to the ``approx`` constructor:: 640 641 >>> 1.0001 == approx(1) 642 False 643 >>> 1.0001 == approx(1, rel=1e-3) 644 True 645 >>> 1.0001 == approx(1, abs=1e-3) 646 True 647 648 If you specify ``abs`` but not ``rel``, the comparison will not consider 649 the relative tolerance at all. In other words, two numbers that are within 650 the default relative tolerance of ``1e-6`` will still be considered unequal 651 if they exceed the specified absolute tolerance. If you specify both 652 ``abs`` and ``rel``, the numbers will be considered equal if either 653 tolerance is met:: 654 655 >>> 1 + 1e-8 == approx(1) 656 True 657 >>> 1 + 1e-8 == approx(1, abs=1e-12) 658 False 659 >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) 660 True 661 662 **Non-numeric types** 663 664 You can also use ``approx`` to compare non-numeric types, or dicts and 665 sequences containing non-numeric types, in which case it falls back to 666 strict equality. This can be useful for comparing dicts and sequences that 667 can contain optional values:: 668 669 >>> {"required": 1.0000005, "optional": None} == approx({"required": 1, "optional": None}) 670 True 671 >>> [None, 1.0000005] == approx([None,1]) 672 True 673 >>> ["foo", 1.0000005] == approx([None,1]) 674 False 675 676 If you're thinking about using ``approx``, then you might want to know how 677 it compares to other good ways of comparing floating-point numbers. All of 678 these algorithms are based on relative and absolute tolerances and should 679 agree for the most part, but they do have meaningful differences: 680 681 - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative 682 tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute 683 tolerance is met. Because the relative tolerance is calculated w.r.t. 684 both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor 685 ``b`` is a "reference value"). You have to specify an absolute tolerance 686 if you want to compare to ``0.0`` because there is no tolerance by 687 default. More information: :py:func:`math.isclose`. 688 689 - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference 690 between ``a`` and ``b`` is less that the sum of the relative tolerance 691 w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance 692 is only calculated w.r.t. ``b``, this test is asymmetric and you can 693 think of ``b`` as the reference value. Support for comparing sequences 694 is provided by :py:func:`numpy.allclose`. More information: 695 :std:doc:`numpy:reference/generated/numpy.isclose`. 696 697 - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` 698 are within an absolute tolerance of ``1e-7``. No relative tolerance is 699 considered , so this function is not appropriate for very large or very 700 small numbers. Also, it's only available in subclasses of ``unittest.TestCase`` 701 and it's ugly because it doesn't follow PEP8. More information: 702 :py:meth:`unittest.TestCase.assertAlmostEqual`. 703 704 - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative 705 tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. 706 Because the relative tolerance is only calculated w.r.t. ``b``, this test 707 is asymmetric and you can think of ``b`` as the reference value. In the 708 special case that you explicitly specify an absolute tolerance but not a 709 relative tolerance, only the absolute tolerance is considered. 710 711 .. note:: 712 713 ``approx`` can handle numpy arrays, but we recommend the 714 specialised test helpers in :std:doc:`numpy:reference/routines.testing` 715 if you need support for comparisons, NaNs, or ULP-based tolerances. 716 717 To match strings using regex, you can use 718 `Matches <https://github.com/asottile/re-assert#re_assertmatchespattern-str-args-kwargs>`_ 719 from the 720 `re_assert package <https://github.com/asottile/re-assert>`_. 721 722 723 .. note:: 724 725 Unlike built-in equality, this function considers 726 booleans unequal to numeric zero or one. For example:: 727 728 >>> 1 == approx(True) 729 False 730 731 .. warning:: 732 733 .. versionchanged:: 3.2 734 735 In order to avoid inconsistent behavior, :py:exc:`TypeError` is 736 raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. 737 The example below illustrates the problem:: 738 739 assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10) 740 assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10) 741 742 In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` 743 to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to 744 comparison. This is because the call hierarchy of rich comparisons 745 follows a fixed behavior. More information: :py:meth:`object.__ge__` 746 747 .. versionchanged:: 3.7.1 748 ``approx`` raises ``TypeError`` when it encounters a dict value or 749 sequence element of non-numeric type. 750 751 .. versionchanged:: 6.1.0 752 ``approx`` falls back to strict equality for non-numeric types instead 753 of raising ``TypeError``. 754 """ 755 # Delegate the comparison to a class that knows how to deal with the type 756 # of the expected value (e.g. int, float, list, dict, numpy.array, etc). 757 # 758 # The primary responsibility of these classes is to implement ``__eq__()`` 759 # and ``__repr__()``. The former is used to actually check if some 760 # "actual" value is equivalent to the given expected value within the 761 # allowed tolerance. The latter is used to show the user the expected 762 # value and tolerance, in the case that a test failed. 763 # 764 # The actual logic for making approximate comparisons can be found in 765 # ApproxScalar, which is used to compare individual numbers. All of the 766 # other Approx classes eventually delegate to this class. The ApproxBase 767 # class provides some convenient methods and overloads, but isn't really 768 # essential. 769 770 __tracebackhide__ = True 771 772 if isinstance(expected, Decimal): 773 cls: type[ApproxBase] = ApproxDecimal 774 elif isinstance(expected, Mapping): 775 cls = ApproxMapping 776 elif _is_numpy_array(expected): 777 expected = _as_numpy_array(expected) 778 cls = ApproxNumpy 779 elif _is_sequence_like(expected): 780 cls = ApproxSequenceLike 781 elif isinstance(expected, Collection) and not isinstance(expected, str | bytes): 782 msg = f"pytest.approx() only supports ordered sequences, but got: {expected!r}" 783 raise TypeError(msg) 784 else: 785 cls = ApproxScalar 786 787 return cls(expected, rel, abs, nan_ok)
Assert that two numbers (or two ordered sequences of numbers) are equal to each other within some tolerance.
Due to the :doc:python:tutorial/floatingpoint, numbers that we
would intuitively expect to be equal are not always so::
>>> 0.1 + 0.2 == 0.3
False
This problem is commonly encountered when writing tests, e.g. when making sure that floating-point values are what you expect them to be. One way to deal with this problem is to assert that two floating-point numbers are equal to within some appropriate tolerance::
>>> abs((0.1 + 0.2) - 0.3) < 1e-6
True
However, comparisons like this are tedious to write and difficult to
understand. Furthermore, absolute comparisons like the one above are
usually discouraged because there's no tolerance that works well for all
situations. 1e-6 is good for numbers around 1, but too small for
very big numbers and too big for very small ones. It's better to express
the tolerance as a fraction of the expected value, but relative comparisons
like that are even more difficult to write correctly and concisely.
The approx class performs floating-point comparisons using a syntax
that's as intuitive as possible::
>>> from pytest import approx
>>> 0.1 + 0.2 == approx(0.3)
True
The same syntax also works for ordered sequences of numbers::
>>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
True
numpy arrays::
>>> import numpy as np # doctest: +SKIP
>>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP
True
And for a numpy array against a scalar::
>>> import numpy as np # doctest: +SKIP
>>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP
True
Only ordered sequences are supported, because approx needs
to infer the relative position of the sequences without ambiguity. This means
sets and other unordered sequences are not supported.
Finally, dictionary values can also be compared::
>>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6})
True
The comparison will be true if both mappings have the same keys and their respective values match the expected tolerances.
Tolerances
By default, approx considers numbers within a relative tolerance of
1e-6 (i.e. one part in a million) of its expected value to be equal.
This treatment would lead to surprising results if the expected value was
0.0, because nothing but 0.0 itself is relatively close to 0.0.
To handle this case less surprisingly, approx also considers numbers
within an absolute tolerance of 1e-12 of its expected value to be
equal. Infinity and NaN are special cases. Infinity is only considered
equal to itself, regardless of the relative tolerance. NaN is not
considered equal to anything by default, but you can make it be equal to
itself by setting the nan_ok argument to True. (This is meant to
facilitate comparing arrays that use NaN to mean "no data".)
Both the relative and absolute tolerances can be changed by passing
arguments to the approx constructor::
>>> 1.0001 == approx(1)
False
>>> 1.0001 == approx(1, rel=1e-3)
True
>>> 1.0001 == approx(1, abs=1e-3)
True
If you specify abs but not rel, the comparison will not consider
the relative tolerance at all. In other words, two numbers that are within
the default relative tolerance of 1e-6 will still be considered unequal
if they exceed the specified absolute tolerance. If you specify both
abs and rel, the numbers will be considered equal if either
tolerance is met::
>>> 1 + 1e-8 == approx(1)
True
>>> 1 + 1e-8 == approx(1, abs=1e-12)
False
>>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
True
Non-numeric types
You can also use approx to compare non-numeric types, or dicts and
sequences containing non-numeric types, in which case it falls back to
strict equality. This can be useful for comparing dicts and sequences that
can contain optional values::
>>> {"required": 1.0000005, "optional": None} == approx({"required": 1, "optional": None})
True
>>> [None, 1.0000005] == approx([None,1])
True
>>> ["foo", 1.0000005] == approx([None,1])
False
If you're thinking about using approx, then you might want to know how
it compares to other good ways of comparing floating-point numbers. All of
these algorithms are based on relative and absolute tolerances and should
agree for the most part, but they do have meaningful differences:
math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0): True if the relative tolerance is met w.r.t. eitheraorbor if the absolute tolerance is met. Because the relative tolerance is calculated w.r.t. bothaandb, this test is symmetric (i.e. neitheranorbis a "reference value"). You have to specify an absolute tolerance if you want to compare to0.0because there is no tolerance by default. More information:math.isclose().numpy.isclose(a, b, rtol=1e-5, atol=1e-8): True if the difference betweenaandbis less that the sum of the relative tolerance w.r.t.band the absolute tolerance. Because the relative tolerance is only calculated w.r.t.b, this test is asymmetric and you can think ofbas the reference value. Support for comparing sequences is provided bynumpy.allclose(). More information: :std:doc:numpy:reference/generated/numpy.isclose.unittest.TestCase.assertAlmostEqual(a, b): True ifaandbare within an absolute tolerance of1e-7. No relative tolerance is considered , so this function is not appropriate for very large or very small numbers. Also, it's only available in subclasses ofunittest.TestCaseand it's ugly because it doesn't follow PEP8. More information:unittest.TestCase.assertAlmostEqual().a == pytest.approx(b, rel=1e-6, abs=1e-12): True if the relative tolerance is met w.r.t.bor if the absolute tolerance is met. Because the relative tolerance is only calculated w.r.t.b, this test is asymmetric and you can think ofbas the reference value. In the special case that you explicitly specify an absolute tolerance but not a relative tolerance, only the absolute tolerance is considered.
approx can handle numpy arrays, but we recommend the
specialised test helpers in :std:doc:numpy:reference/routines.testing
if you need support for comparisons, NaNs, or ULP-based tolerances.
To match strings using regex, you can use Matches from the re_assert package .
Unlike built-in equality, this function considers booleans unequal to numeric zero or one. For example::
>>> 1 == approx(True)
False
.. versionchanged:: 3.2
In order to avoid inconsistent behavior, TypeError is
raised for >, >=, < and <= comparisons.
The example below illustrates the problem::
assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10)
assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10)
In the second example one expects approx(0.1).__le__(0.1 + 1e-10)
to be called. But instead, approx(0.1).__lt__(0.1 + 1e-10) is used to
comparison. This is because the call hierarchy of rich comparisons
follows a fixed behavior. More information: object.__ge__()
Changed in version 3.7.1:
approx raises TypeError when it encounters a dict value or
sequence element of non-numeric type.
Changed in version 6.1.0:
approx falls back to strict equality for non-numeric types instead
of raising TypeError.
171def main( 172 args: list[str] | os.PathLike[str] | None = None, 173 plugins: Sequence[str | _PluggyPlugin] | None = None, 174) -> int | ExitCode: 175 """Perform an in-process test run. 176 177 :param args: 178 List of command line arguments. If `None` or not given, defaults to reading 179 arguments directly from the process command line (:data:`sys.argv`). 180 :param plugins: List of plugin objects to be auto-registered during initialization. 181 182 :returns: An exit code. 183 """ 184 # Handle a single `--version` argument early to avoid starting up the entire pytest infrastructure. 185 new_args = sys.argv[1:] if args is None else args 186 if isinstance(new_args, Sequence) and new_args.count("--version") == 1: 187 sys.stdout.write(f"pytest {__version__}\n") 188 return ExitCode.OK 189 190 old_pytest_version = os.environ.get("PYTEST_VERSION") 191 try: 192 os.environ["PYTEST_VERSION"] = __version__ 193 try: 194 config = _prepareconfig(new_args, plugins) 195 except ConftestImportFailure as e: 196 print_conftest_import_error(e, file=sys.stderr) 197 return ExitCode.USAGE_ERROR 198 199 try: 200 ret: ExitCode | int = config.hook.pytest_cmdline_main(config=config) 201 try: 202 return ExitCode(ret) 203 except ValueError: 204 return ret 205 finally: 206 config._ensure_unconfigure() 207 except UsageError as e: 208 print_usage_error(e, file=sys.stderr) 209 return ExitCode.USAGE_ERROR 210 finally: 211 if old_pytest_version is None: 212 os.environ.pop("PYTEST_VERSION", None) 213 else: 214 os.environ["PYTEST_VERSION"] = old_pytest_version
Perform an in-process test run.
Parameters
- args:
List of command line arguments. If
Noneor not given, defaults to reading arguments directly from the process command line (sys.argv). - plugins: List of plugin objects to be auto-registered during initialization.
:returns: An exit code.
217def console_main() -> int: 218 """The CLI entry point of pytest. 219 220 This function is not meant for programmable use; use `main()` instead. 221 """ 222 # https://docs.python.org/3/library/signal.html#note-on-sigpipe 223 try: 224 code = main() 225 sys.stdout.flush() 226 return code 227 except BrokenPipeError: 228 # Python flushes standard streams on exit; redirect remaining output 229 # to devnull to avoid another BrokenPipeError at shutdown 230 devnull = os.open(os.devnull, os.O_WRONLY) 231 os.dup2(devnull, sys.stdout.fileno()) 232 return 1 # Python exits with error code 1 on EPIPE
The CLI entry point of pytest.
This function is not meant for programmable use; use main() instead.
56def deprecated_call( 57 func: Callable[..., Any] | None = None, *args: Any, **kwargs: Any 58) -> WarningsRecorder | Any: 59 """Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning`` or ``FutureWarning``. 60 61 This function can be used as a context manager:: 62 63 >>> import warnings 64 >>> def api_call_v2(): 65 ... warnings.warn('use v3 of this api', DeprecationWarning) 66 ... return 200 67 68 >>> import pytest 69 >>> with pytest.deprecated_call(): 70 ... assert api_call_v2() == 200 71 72 It can also be used by passing a function and ``*args`` and ``**kwargs``, 73 in which case it will ensure calling ``func(*args, **kwargs)`` produces one of 74 the warnings types above. The return value is the return value of the function. 75 76 In the context manager form you may use the keyword argument ``match`` to assert 77 that the warning matches a text or regex. 78 79 The context manager produces a list of :class:`warnings.WarningMessage` objects, 80 one for each warning raised. 81 """ 82 __tracebackhide__ = True 83 if func is not None: 84 args = (func, *args) 85 return warns( 86 (DeprecationWarning, PendingDeprecationWarning, FutureWarning), *args, **kwargs 87 )
Assert that code produces a DeprecationWarning or PendingDeprecationWarning or FutureWarning.
This function can be used as a context manager::
>>> import warnings
>>> def api_call_v2():
... warnings.warn('use v3 of this api', DeprecationWarning)
... return 200
>>> import pytest
>>> with pytest.deprecated_call():
... assert api_call_v2() == 200
It can also be used by passing a function and *args and **kwargs,
in which case it will ensure calling func(*args, **kwargs) produces one of
the warnings types above. The return value is the return value of the function.
In the context manager form you may use the keyword argument match to assert
that the warning matches a text or regex.
The context manager produces a list of warnings.WarningMessage objects,
one for each warning raised.
1330def fixture( 1331 fixture_function: FixtureFunction | None = None, 1332 *, 1333 scope: _ScopeName | Callable[[str, Config], _ScopeName] = "function", 1334 params: Iterable[object] | None = None, 1335 autouse: bool = False, 1336 ids: Sequence[object | None] | Callable[[Any], object | None] | None = None, 1337 name: str | None = None, 1338) -> FixtureFunctionMarker | FixtureFunctionDefinition: 1339 """Decorator to mark a fixture factory function. 1340 1341 This decorator can be used, with or without parameters, to define a 1342 fixture function. 1343 1344 The name of the fixture function can later be referenced to cause its 1345 invocation ahead of running tests: test modules or classes can use the 1346 ``pytest.mark.usefixtures(fixturename)`` marker. 1347 1348 Test functions can directly use fixture names as input arguments in which 1349 case the fixture instance returned from the fixture function will be 1350 injected. 1351 1352 Fixtures can provide their values to test functions using ``return`` or 1353 ``yield`` statements. When using ``yield`` the code block after the 1354 ``yield`` statement is executed as teardown code regardless of the test 1355 outcome, and must yield exactly once. 1356 1357 :param scope: 1358 The scope for which this fixture is shared; one of ``"function"`` 1359 (default), ``"class"``, ``"module"``, ``"package"`` or ``"session"``. 1360 1361 This parameter may also be a callable which receives ``(fixture_name, config)`` 1362 as parameters, and must return a ``str`` with one of the values mentioned above. 1363 1364 See :ref:`dynamic scope` in the docs for more information. 1365 1366 :param params: 1367 An optional list of parameters which will cause multiple invocations 1368 of the fixture function and all of the tests using it. The current 1369 parameter is available in ``request.param``. 1370 1371 :param autouse: 1372 If True, the fixture func is activated for all tests that can see it. 1373 If False (the default), an explicit reference is needed to activate 1374 the fixture. 1375 1376 :param ids: 1377 Sequence of ids each corresponding to the params so that they are 1378 part of the test id. If no ids are provided they will be generated 1379 automatically from the params. 1380 1381 :param name: 1382 The name of the fixture. This defaults to the name of the decorated 1383 function. If a fixture is used in the same module in which it is 1384 defined, the function name of the fixture will be shadowed by the 1385 function arg that requests the fixture; one way to resolve this is to 1386 name the decorated function ``fixture_<fixturename>`` and then use 1387 ``@pytest.fixture(name='<fixturename>')``. 1388 """ 1389 fixture_marker = FixtureFunctionMarker( 1390 scope=scope, 1391 params=tuple(params) if params is not None else None, 1392 autouse=autouse, 1393 ids=None if ids is None else ids if callable(ids) else tuple(ids), 1394 name=name, 1395 _ispytest=True, 1396 ) 1397 1398 # Direct decoration. 1399 if fixture_function: 1400 return fixture_marker(fixture_function) 1401 1402 return fixture_marker
Decorator to mark a fixture factory function.
This decorator can be used, with or without parameters, to define a fixture function.
The name of the fixture function can later be referenced to cause its
invocation ahead of running tests: test modules or classes can use the
pytest.mark.usefixtures(fixturename) marker.
Test functions can directly use fixture names as input arguments in which case the fixture instance returned from the fixture function will be injected.
Fixtures can provide their values to test functions using return or
yield statements. When using yield the code block after the
yield statement is executed as teardown code regardless of the test
outcome, and must yield exactly once.
Parameters
scope: The scope for which this fixture is shared; one of
"function"(default),"class","module","package"or"session".This parameter may also be a callable which receives
(fixture_name, config)as parameters, and must return astrwith one of the values mentioned above.See :ref:
dynamic scopein the docs for more information.params: An optional list of parameters which will cause multiple invocations of the fixture function and all of the tests using it. The current parameter is available in
request.param.autouse: If True, the fixture func is activated for all tests that can see it. If False (the default), an explicit reference is needed to activate the fixture.
ids: Sequence of ids each corresponding to the params so that they are part of the test id. If no ids are provided they will be generated automatically from the params.
name: The name of the fixture. This defaults to the name of the decorated function. If a fixture is used in the same module in which it is defined, the function name of the fixture will be shadowed by the function arg that requests the fixture; one way to resolve this is to name the decorated function
fixture_<fixturename>and then use@pytest.fixture(name='<fixturename>').
11def freeze_includes() -> list[str]: 12 """Return a list of module names used by pytest that should be 13 included by cx_freeze.""" 14 import _pytest 15 16 result = list(_iter_all_modules(_pytest)) 17 return result
Return a list of module names used by pytest that should be included by cx_freeze.
200def importorskip( 201 modname: str, 202 minversion: str | None = None, 203 reason: str | None = None, 204 *, 205 exc_type: type[ImportError] | None = None, 206) -> Any: 207 """Import and return the requested module ``modname``, or skip the 208 current test if the module cannot be imported. 209 210 :param modname: 211 The name of the module to import. 212 :param minversion: 213 If given, the imported module's ``__version__`` attribute must be at 214 least this minimal version, otherwise the test is still skipped. 215 :param reason: 216 If given, this reason is shown as the message when the module cannot 217 be imported. 218 :param exc_type: 219 The exception that should be captured in order to skip modules. 220 Must be :py:class:`ImportError` or a subclass. 221 222 If the module can be imported but raises :class:`ImportError`, pytest will 223 issue a warning to the user, as often users expect the module not to be 224 found (which would raise :class:`ModuleNotFoundError` instead). 225 226 This warning can be suppressed by passing ``exc_type=ImportError`` explicitly. 227 228 See :ref:`import-or-skip-import-error` for details. 229 230 231 :returns: 232 The imported module. This should be assigned to its canonical name. 233 234 :raises pytest.skip.Exception: 235 If the module cannot be imported. 236 237 Example:: 238 239 docutils = pytest.importorskip("docutils") 240 241 .. versionadded:: 8.2 242 243 The ``exc_type`` parameter. 244 """ 245 import warnings 246 247 __tracebackhide__ = True 248 compile(modname, "", "eval") # to catch syntaxerrors 249 250 # Until pytest 9.1, we will warn the user if we catch ImportError (instead of ModuleNotFoundError), 251 # as this might be hiding an installation/environment problem, which is not usually what is intended 252 # when using importorskip() (#11523). 253 # In 9.1, to keep the function signature compatible, we just change the code below to: 254 # 1. Use `exc_type = ModuleNotFoundError` if `exc_type` is not given. 255 # 2. Remove `warn_on_import` and the warning handling. 256 if exc_type is None: 257 exc_type = ImportError 258 warn_on_import_error = True 259 else: 260 warn_on_import_error = False 261 262 skipped: Skipped | None = None 263 warning: Warning | None = None 264 265 with warnings.catch_warnings(): 266 # Make sure to ignore ImportWarnings that might happen because 267 # of existing directories with the same name we're trying to 268 # import but without a __init__.py file. 269 warnings.simplefilter("ignore") 270 271 try: 272 __import__(modname) 273 except exc_type as exc: 274 # Do not raise or issue warnings inside the catch_warnings() block. 275 if reason is None: 276 reason = f"could not import {modname!r}: {exc}" 277 skipped = Skipped(reason, allow_module_level=True) 278 279 if warn_on_import_error and not isinstance(exc, ModuleNotFoundError): 280 lines = [ 281 "", 282 f"Module '{modname}' was found, but when imported by pytest it raised:", 283 f" {exc!r}", 284 "In pytest 9.1 this warning will become an error by default.", 285 "You can fix the underlying problem, or alternatively overwrite this behavior and silence this " 286 "warning by passing exc_type=ImportError explicitly.", 287 "See https://docs.pytest.org/en/stable/deprecations.html#pytest-importorskip-default-behavior-regarding-importerror", 288 ] 289 warning = PytestDeprecationWarning("\n".join(lines)) 290 291 if warning: 292 warnings.warn(warning, stacklevel=2) 293 if skipped: 294 raise skipped 295 296 mod = sys.modules[modname] 297 if minversion is None: 298 return mod 299 verattr = getattr(mod, "__version__", None) 300 if minversion is not None: 301 # Imported lazily to improve start-up time. 302 from packaging.version import Version 303 304 if verattr is None or Version(verattr) < Version(minversion): 305 raise Skipped( 306 f"module {modname!r} has __version__ {verattr!r}, required is: {minversion!r}", 307 allow_module_level=True, 308 ) 309 return mod
Import and return the requested module modname, or skip the
current test if the module cannot be imported.
Parameters
- modname: The name of the module to import.
- minversion:
If given, the imported module's
__version__attribute must be at least this minimal version, otherwise the test is still skipped. - reason: If given, this reason is shown as the message when the module cannot be imported.
exc_type: The exception that should be captured in order to skip modules. Must be
ImportErroror a subclass.If the module can be imported but raises
ImportError, pytest will issue a warning to the user, as often users expect the module not to be found (which would raiseModuleNotFoundErrorinstead).This warning can be suppressed by passing
exc_type=ImportErrorexplicitly.See :ref:
import-or-skip-import-errorfor details.
:returns: The imported module. This should be assigned to its canonical name.
Raises
- pytest.skip.Exception: If the module cannot be imported.
Example::
docutils = pytest.importorskip("docutils")
New in version 8.2:
The exc_type parameter.
171def main( 172 args: list[str] | os.PathLike[str] | None = None, 173 plugins: Sequence[str | _PluggyPlugin] | None = None, 174) -> int | ExitCode: 175 """Perform an in-process test run. 176 177 :param args: 178 List of command line arguments. If `None` or not given, defaults to reading 179 arguments directly from the process command line (:data:`sys.argv`). 180 :param plugins: List of plugin objects to be auto-registered during initialization. 181 182 :returns: An exit code. 183 """ 184 # Handle a single `--version` argument early to avoid starting up the entire pytest infrastructure. 185 new_args = sys.argv[1:] if args is None else args 186 if isinstance(new_args, Sequence) and new_args.count("--version") == 1: 187 sys.stdout.write(f"pytest {__version__}\n") 188 return ExitCode.OK 189 190 old_pytest_version = os.environ.get("PYTEST_VERSION") 191 try: 192 os.environ["PYTEST_VERSION"] = __version__ 193 try: 194 config = _prepareconfig(new_args, plugins) 195 except ConftestImportFailure as e: 196 print_conftest_import_error(e, file=sys.stderr) 197 return ExitCode.USAGE_ERROR 198 199 try: 200 ret: ExitCode | int = config.hook.pytest_cmdline_main(config=config) 201 try: 202 return ExitCode(ret) 203 except ValueError: 204 return ret 205 finally: 206 config._ensure_unconfigure() 207 except UsageError as e: 208 print_usage_error(e, file=sys.stderr) 209 return ExitCode.USAGE_ERROR 210 finally: 211 if old_pytest_version is None: 212 os.environ.pop("PYTEST_VERSION", None) 213 else: 214 os.environ["PYTEST_VERSION"] = old_pytest_version
Perform an in-process test run.
Parameters
- args:
List of command line arguments. If
Noneor not given, defaults to reading arguments directly from the process command line (sys.argv). - plugins: List of plugin objects to be auto-registered during initialization.
:returns: An exit code.
50def param( 51 *values: object, 52 marks: MarkDecorator | Collection[MarkDecorator | Mark] = (), 53 id: str | _HiddenParam | None = None, 54) -> ParameterSet: 55 """Specify a parameter in `pytest.mark.parametrize`_ calls or 56 :ref:`parametrized fixtures <fixture-parametrize-marks>`. 57 58 .. code-block:: python 59 60 @pytest.mark.parametrize( 61 "test_input,expected", 62 [ 63 ("3+5", 8), 64 pytest.param("6*9", 42, marks=pytest.mark.xfail), 65 ], 66 ) 67 def test_eval(test_input, expected): 68 assert eval(test_input) == expected 69 70 :param values: Variable args of the values of the parameter set, in order. 71 72 :param marks: 73 A single mark or a list of marks to be applied to this parameter set. 74 75 :ref:`pytest.mark.usefixtures <pytest.mark.usefixtures ref>` cannot be added via this parameter. 76 77 :type id: str | Literal[pytest.HIDDEN_PARAM] | None 78 :param id: 79 The id to attribute to this parameter set. 80 81 .. versionadded:: 8.4 82 :ref:`hidden-param` means to hide the parameter set 83 from the test name. Can only be used at most 1 time, as 84 test names need to be unique. 85 """ 86 return ParameterSet.param(*values, marks=marks, id=id)
Specify a parameter in pytest.mark.parametrize_ calls or
:ref:parametrized fixtures <fixture-parametrize-marks>.
@pytest.mark.parametrize(
"test_input,expected",
[
("3+5", 8),
pytest.param("6*9", 42, marks=pytest.mark.xfail),
],
)
def test_eval(test_input, expected):
assert eval(test_input) == expected
Parameters
values: Variable args of the values of the parameter set, in order.
marks: A single mark or a list of marks to be applied to this parameter set.
:ref:
pytest.mark.usefixtures <pytest.mark.usefixtures ref>cannot be added via this parameter.id: The id to attribute to this parameter set.
New in version 8.4:
from the test name. Can only be used at most 1 time, as test names need to be unique.
105def raises( 106 expected_exception: type[E] | tuple[type[E], ...] | None = None, 107 *args: Any, 108 **kwargs: Any, 109) -> RaisesExc[BaseException] | ExceptionInfo[E]: 110 r"""Assert that a code block/function call raises an exception type, or one of its subclasses. 111 112 :param expected_exception: 113 The expected exception type, or a tuple if one of multiple possible 114 exception types are expected. Note that subclasses of the passed exceptions 115 will also match. 116 117 This is not a required parameter, you may opt to only use ``match`` and/or 118 ``check`` for verifying the raised exception. 119 120 :kwparam str | re.Pattern[str] | None match: 121 If specified, a string containing a regular expression, 122 or a regular expression object, that is tested against the string 123 representation of the exception and its :pep:`678` `__notes__` 124 using :func:`re.search`. 125 126 To match a literal string that may contain :ref:`special characters 127 <re-syntax>`, the pattern can first be escaped with :func:`re.escape`. 128 129 (This is only used when ``pytest.raises`` is used as a context manager, 130 and passed through to the function otherwise. 131 When using ``pytest.raises`` as a function, you can use: 132 ``pytest.raises(Exc, func, match="passed on").match("my pattern")``.) 133 134 :kwparam Callable[[BaseException], bool] check: 135 136 .. versionadded:: 8.4 137 138 If specified, a callable that will be called with the exception as a parameter 139 after checking the type and the match regex if specified. 140 If it returns ``True`` it will be considered a match, if not it will 141 be considered a failed match. 142 143 144 Use ``pytest.raises`` as a context manager, which will capture the exception of the given 145 type, or any of its subclasses:: 146 147 >>> import pytest 148 >>> with pytest.raises(ZeroDivisionError): 149 ... 1/0 150 151 If the code block does not raise the expected exception (:class:`ZeroDivisionError` in the example 152 above), or no exception at all, the check will fail instead. 153 154 You can also use the keyword argument ``match`` to assert that the 155 exception matches a text or regex:: 156 157 >>> with pytest.raises(ValueError, match='must be 0 or None'): 158 ... raise ValueError("value must be 0 or None") 159 160 >>> with pytest.raises(ValueError, match=r'must be \d+$'): 161 ... raise ValueError("value must be 42") 162 163 The ``match`` argument searches the formatted exception string, which includes any 164 `PEP-678 <https://peps.python.org/pep-0678/>`__ ``__notes__``: 165 166 >>> with pytest.raises(ValueError, match=r"had a note added"): # doctest: +SKIP 167 ... e = ValueError("value must be 42") 168 ... e.add_note("had a note added") 169 ... raise e 170 171 The ``check`` argument, if provided, must return True when passed the raised exception 172 for the match to be successful, otherwise an :exc:`AssertionError` is raised. 173 174 >>> import errno 175 >>> with pytest.raises(OSError, check=lambda e: e.errno == errno.EACCES): 176 ... raise OSError(errno.EACCES, "no permission to view") 177 178 The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the 179 details of the captured exception:: 180 181 >>> with pytest.raises(ValueError) as exc_info: 182 ... raise ValueError("value must be 42") 183 >>> assert exc_info.type is ValueError 184 >>> assert exc_info.value.args[0] == "value must be 42" 185 186 .. warning:: 187 188 Given that ``pytest.raises`` matches subclasses, be wary of using it to match :class:`Exception` like this:: 189 190 # Careful, this will catch ANY exception raised. 191 with pytest.raises(Exception): 192 some_function() 193 194 Because :class:`Exception` is the base class of almost all exceptions, it is easy for this to hide 195 real bugs, where the user wrote this expecting a specific exception, but some other exception is being 196 raised due to a bug introduced during a refactoring. 197 198 Avoid using ``pytest.raises`` to catch :class:`Exception` unless certain that you really want to catch 199 **any** exception raised. 200 201 .. note:: 202 203 When using ``pytest.raises`` as a context manager, it's worthwhile to 204 note that normal context manager rules apply and that the exception 205 raised *must* be the final line in the scope of the context manager. 206 Lines of code after that, within the scope of the context manager will 207 not be executed. For example:: 208 209 >>> value = 15 210 >>> with pytest.raises(ValueError) as exc_info: 211 ... if value > 10: 212 ... raise ValueError("value must be <= 10") 213 ... assert exc_info.type is ValueError # This will not execute. 214 215 Instead, the following approach must be taken (note the difference in 216 scope):: 217 218 >>> with pytest.raises(ValueError) as exc_info: 219 ... if value > 10: 220 ... raise ValueError("value must be <= 10") 221 ... 222 >>> assert exc_info.type is ValueError 223 224 **Expecting exception groups** 225 226 When expecting exceptions wrapped in :exc:`BaseExceptionGroup` or 227 :exc:`ExceptionGroup`, you should instead use :class:`pytest.RaisesGroup`. 228 229 **Using with** ``pytest.mark.parametrize`` 230 231 When using :ref:`pytest.mark.parametrize ref` 232 it is possible to parametrize tests such that 233 some runs raise an exception and others do not. 234 235 See :ref:`parametrizing_conditional_raising` for an example. 236 237 .. seealso:: 238 239 :ref:`assertraises` for more examples and detailed discussion. 240 241 **Legacy form** 242 243 It is possible to specify a callable by passing a to-be-called lambda:: 244 245 >>> raises(ZeroDivisionError, lambda: 1/0) 246 <ExceptionInfo ...> 247 248 or you can specify an arbitrary callable with arguments:: 249 250 >>> def f(x): return 1/x 251 ... 252 >>> raises(ZeroDivisionError, f, 0) 253 <ExceptionInfo ...> 254 >>> raises(ZeroDivisionError, f, x=0) 255 <ExceptionInfo ...> 256 257 The form above is fully supported but discouraged for new code because the 258 context manager form is regarded as more readable and less error-prone. 259 260 .. note:: 261 Similar to caught exception objects in Python, explicitly clearing 262 local references to returned ``ExceptionInfo`` objects can 263 help the Python interpreter speed up its garbage collection. 264 265 Clearing those references breaks a reference cycle 266 (``ExceptionInfo`` --> caught exception --> frame stack raising 267 the exception --> current frame stack --> local variables --> 268 ``ExceptionInfo``) which makes Python keep all objects referenced 269 from that cycle (including all local variables in the current 270 frame) alive until the next cyclic garbage collection run. 271 More detailed information can be found in the official Python 272 documentation for :ref:`the try statement <python:try>`. 273 """ 274 __tracebackhide__ = True 275 276 if not args: 277 if set(kwargs) - {"match", "check", "expected_exception"}: 278 msg = "Unexpected keyword arguments passed to pytest.raises: " 279 msg += ", ".join(sorted(kwargs)) 280 msg += "\nUse context-manager form instead?" 281 raise TypeError(msg) 282 283 if expected_exception is None: 284 return RaisesExc(**kwargs) 285 return RaisesExc(expected_exception, **kwargs) 286 287 if not expected_exception: 288 raise ValueError( 289 f"Expected an exception type or a tuple of exception types, but got `{expected_exception!r}`. " 290 f"Raising exceptions is already understood as failing the test, so you don't need " 291 f"any special code to say 'this should never raise an exception'." 292 ) 293 func = args[0] 294 if not callable(func): 295 raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") 296 with RaisesExc(expected_exception) as excinfo: 297 func(*args[1:], **kwargs) 298 try: 299 return excinfo 300 finally: 301 del excinfo
Assert that a code block/function call raises an exception type, or one of its subclasses.
Parameters
expected_exception: The expected exception type, or a tuple if one of multiple possible exception types are expected. Note that subclasses of the passed exceptions will also match.
This is not a required parameter, you may opt to only use
matchand/orcheckfor verifying the raised exception.
:kwparam str | re.Pattern[str] | None match:
If specified, a string containing a regular expression,
or a regular expression object, that is tested against the string
representation of the exception and its :pep:678 __notes__
using re.search().
To match a literal string that may contain :ref:`special characters
<re-syntax>`, the pattern can first be escaped with `re.escape()`.
(This is only used when ``pytest.raises`` is used as a context manager,
and passed through to the function otherwise.
When using ``pytest.raises`` as a function, you can use:
``pytest.raises(Exc, func, match="passed on").match("my pattern")``.)
:kwparam Callable[[BaseException], bool] check:
*New in version 8.4.*
If specified, a callable that will be called with the exception as a parameter
after checking the type and the match regex if specified.
If it returns ``True`` it will be considered a match, if not it will
be considered a failed match.
Use pytest.raises as a context manager, which will capture the exception of the given
type, or any of its subclasses::
>>> import pytest
>>> with pytest.raises(ZeroDivisionError):
... 1/0
If the code block does not raise the expected exception (ZeroDivisionError in the example
above), or no exception at all, the check will fail instead.
You can also use the keyword argument match to assert that the
exception matches a text or regex::
>>> with pytest.raises(ValueError, match='must be 0 or None'):
... raise ValueError("value must be 0 or None")
>>> with pytest.raises(ValueError, match=r'must be \d+$'):
... raise ValueError("value must be 42")
The match argument searches the formatted exception string, which includes any
PEP-678 _ __notes__:
>>> with pytest.raises(ValueError, match=r"had a note added"): # doctest: +SKIP
... e = ValueError("value must be 42")
... e.add_note("had a note added")
... raise e
The check argument, if provided, must return True when passed the raised exception
for the match to be successful, otherwise an AssertionError is raised.
>>> import errno
>>> with pytest.raises(OSError, check=lambda e: e.errno == errno.EACCES):
... raise OSError(errno.EACCES, "no permission to view")
The context manager produces an ExceptionInfo object which can be used to inspect the
details of the captured exception::
>>> with pytest.raises(ValueError) as exc_info:
... raise ValueError("value must be 42")
>>> assert exc_info.type is ValueError
>>> assert exc_info.value.args[0] == "value must be 42"
Given that pytest.raises matches subclasses, be wary of using it to match Exception like this::
# Careful, this will catch ANY exception raised.
with pytest.raises(Exception):
some_function()
Because Exception is the base class of almost all exceptions, it is easy for this to hide
real bugs, where the user wrote this expecting a specific exception, but some other exception is being
raised due to a bug introduced during a refactoring.
Avoid using pytest.raises to catch Exception unless certain that you really want to catch
any exception raised.
When using pytest.raises as a context manager, it's worthwhile to
note that normal context manager rules apply and that the exception
raised must be the final line in the scope of the context manager.
Lines of code after that, within the scope of the context manager will
not be executed. For example::
>>> value = 15
>>> with pytest.raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
... assert exc_info.type is ValueError # This will not execute.
Instead, the following approach must be taken (note the difference in scope)::
>>> with pytest.raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
...
>>> assert exc_info.type is ValueError
Expecting exception groups
When expecting exceptions wrapped in BaseExceptionGroup or
ExceptionGroup, you should instead use pytest.RaisesGroup.
Using with pytest.mark.parametrize
When using :ref:pytest.mark.parametrize ref
it is possible to parametrize tests such that
some runs raise an exception and others do not.
See :ref:parametrizing_conditional_raising for an example.
seealso.
Legacy form
It is possible to specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
The form above is fully supported but discouraged for new code because the context manager form is regarded as more readable and less error-prone.
Similar to caught exception objects in Python, explicitly clearing
local references to returned ExceptionInfo objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(ExceptionInfo --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
ExceptionInfo) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run.
More detailed information can be found in the official Python
documentation for :ref:the try statement <python:try>.
72def register_assert_rewrite(*names: str) -> None: 73 """Register one or more module names to be rewritten on import. 74 75 This function will make sure that this module or all modules inside 76 the package will get their assert statements rewritten. 77 Thus you should make sure to call this before the module is 78 actually imported, usually in your __init__.py if you are a plugin 79 using a package. 80 81 :param names: The module names to register. 82 """ 83 for name in names: 84 if not isinstance(name, str): 85 msg = "expected module names as *args, got {0} instead" # type: ignore[unreachable] 86 raise TypeError(msg.format(repr(names))) 87 rewrite_hook: RewriteHook 88 for hook in sys.meta_path: 89 if isinstance(hook, rewrite.AssertionRewritingHook): 90 rewrite_hook = hook 91 break 92 else: 93 rewrite_hook = DummyRewriteHook() 94 rewrite_hook.mark_rewrite(*names)
Register one or more module names to be rewritten on import.
This function will make sure that this module or all modules inside the package will get their assert statements rewritten. Thus you should make sure to call this before the module is actually imported, usually in your __init__.py if you are a plugin using a package.
Parameters
- names: The module names to register.
280 @classmethod 281 def set_trace(cls, *args, **kwargs) -> None: 282 """Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing.""" 283 frame = sys._getframe().f_back 284 _pdb = cls._init_pdb("set_trace", *args, **kwargs) 285 _pdb.set_trace(frame)
Invoke debugging via Pdb.set_trace, dropping any IO capturing.
107def warns( 108 expected_warning: type[Warning] | tuple[type[Warning], ...] = Warning, 109 *args: Any, 110 match: str | re.Pattern[str] | None = None, 111 **kwargs: Any, 112) -> WarningsChecker | Any: 113 r"""Assert that code raises a particular class of warning. 114 115 Specifically, the parameter ``expected_warning`` can be a warning class or tuple 116 of warning classes, and the code inside the ``with`` block must issue at least one 117 warning of that class or classes. 118 119 This helper produces a list of :class:`warnings.WarningMessage` objects, one for 120 each warning emitted (regardless of whether it is an ``expected_warning`` or not). 121 Since pytest 8.0, unmatched warnings are also re-emitted when the context closes. 122 123 This function can be used as a context manager:: 124 125 >>> import pytest 126 >>> with pytest.warns(RuntimeWarning): 127 ... warnings.warn("my warning", RuntimeWarning) 128 129 In the context manager form you may use the keyword argument ``match`` to assert 130 that the warning matches a text or regex:: 131 132 >>> with pytest.warns(UserWarning, match='must be 0 or None'): 133 ... warnings.warn("value must be 0 or None", UserWarning) 134 135 >>> with pytest.warns(UserWarning, match=r'must be \d+$'): 136 ... warnings.warn("value must be 42", UserWarning) 137 138 >>> with pytest.warns(UserWarning): # catch re-emitted warning 139 ... with pytest.warns(UserWarning, match=r'must be \d+$'): 140 ... warnings.warn("this is not here", UserWarning) 141 Traceback (most recent call last): 142 ... 143 Failed: DID NOT WARN. No warnings of type ...UserWarning... were emitted... 144 145 **Using with** ``pytest.mark.parametrize`` 146 147 When using :ref:`pytest.mark.parametrize ref` it is possible to parametrize tests 148 such that some runs raise a warning and others do not. 149 150 This could be achieved in the same way as with exceptions, see 151 :ref:`parametrizing_conditional_raising` for an example. 152 153 """ 154 __tracebackhide__ = True 155 if not args: 156 if kwargs: 157 argnames = ", ".join(sorted(kwargs)) 158 raise TypeError( 159 f"Unexpected keyword arguments passed to pytest.warns: {argnames}" 160 "\nUse context-manager form instead?" 161 ) 162 return WarningsChecker(expected_warning, match_expr=match, _ispytest=True) 163 else: 164 func = args[0] 165 if not callable(func): 166 raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") 167 with WarningsChecker(expected_warning, _ispytest=True): 168 return func(*args[1:], **kwargs)
Assert that code raises a particular class of warning.
Specifically, the parameter expected_warning can be a warning class or tuple
of warning classes, and the code inside the with block must issue at least one
warning of that class or classes.
This helper produces a list of warnings.WarningMessage objects, one for
each warning emitted (regardless of whether it is an expected_warning or not).
Since pytest 8.0, unmatched warnings are also re-emitted when the context closes.
This function can be used as a context manager::
>>> import pytest
>>> with pytest.warns(RuntimeWarning):
... warnings.warn("my warning", RuntimeWarning)
In the context manager form you may use the keyword argument match to assert
that the warning matches a text or regex::
>>> with pytest.warns(UserWarning, match='must be 0 or None'):
... warnings.warn("value must be 0 or None", UserWarning)
>>> with pytest.warns(UserWarning, match=r'must be \d+$'):
... warnings.warn("value must be 42", UserWarning)
>>> with pytest.warns(UserWarning): # catch re-emitted warning
... with pytest.warns(UserWarning, match=r'must be \d+$'):
... warnings.warn("this is not here", UserWarning)
Traceback (most recent call last):
...
Failed: DID NOT WARN. No warnings of type ...UserWarning... were emitted...
Using with pytest.mark.parametrize
When using :ref:pytest.mark.parametrize ref it is possible to parametrize tests
such that some runs raise a warning and others do not.
This could be achieved in the same way as with exceptions, see
:ref:parametrizing_conditional_raising for an example.
1405def yield_fixture( 1406 fixture_function=None, 1407 *args, 1408 scope="function", 1409 params=None, 1410 autouse=False, 1411 ids=None, 1412 name=None, 1413): 1414 """(Return a) decorator to mark a yield-fixture factory function. 1415 1416 .. deprecated:: 3.0 1417 Use :py:func:`pytest.fixture` directly instead. 1418 """ 1419 warnings.warn(YIELD_FIXTURE, stacklevel=2) 1420 return fixture( 1421 fixture_function, 1422 *args, 1423 scope=scope, 1424 params=params, 1425 autouse=autouse, 1426 ids=ids, 1427 name=name, 1428 )
(Return a) decorator to mark a yield-fixture factory function.
Deprecated since version 3.0:
Use pytest.fixture() directly instead.