jinja2.ext
Extension API for adding custom tags and behavior.
1"""Extension API for adding custom tags and behavior.""" 2 3import pprint 4import re 5import typing as t 6 7from markupsafe import Markup 8 9from . import defaults 10from . import nodes 11from .environment import Environment 12from .exceptions import TemplateAssertionError 13from .exceptions import TemplateSyntaxError 14from .runtime import concat # type: ignore 15from .runtime import Context 16from .runtime import Undefined 17from .utils import import_string 18from .utils import pass_context 19 20if t.TYPE_CHECKING: 21 import typing_extensions as te 22 23 from .lexer import Token 24 from .lexer import TokenStream 25 from .parser import Parser 26 27 class _TranslationsBasic(te.Protocol): 28 def gettext(self, message: str) -> str: ... 29 30 def ngettext(self, singular: str, plural: str, n: int) -> str: 31 pass 32 33 class _TranslationsContext(_TranslationsBasic): 34 def pgettext(self, context: str, message: str) -> str: ... 35 36 def npgettext( 37 self, context: str, singular: str, plural: str, n: int 38 ) -> str: ... 39 40 _SupportedTranslations = t.Union[_TranslationsBasic, _TranslationsContext] 41 42 43# I18N functions available in Jinja templates. If the I18N library 44# provides ugettext, it will be assigned to gettext. 45GETTEXT_FUNCTIONS: t.Tuple[str, ...] = ( 46 "_", 47 "gettext", 48 "ngettext", 49 "pgettext", 50 "npgettext", 51) 52_ws_re = re.compile(r"\s*\n\s*") 53 54 55class Extension: 56 """Extensions can be used to add extra functionality to the Jinja template 57 system at the parser level. Custom extensions are bound to an environment 58 but may not store environment specific data on `self`. The reason for 59 this is that an extension can be bound to another environment (for 60 overlays) by creating a copy and reassigning the `environment` attribute. 61 62 As extensions are created by the environment they cannot accept any 63 arguments for configuration. One may want to work around that by using 64 a factory function, but that is not possible as extensions are identified 65 by their import name. The correct way to configure the extension is 66 storing the configuration values on the environment. Because this way the 67 environment ends up acting as central configuration storage the 68 attributes may clash which is why extensions have to ensure that the names 69 they choose for configuration are not too generic. ``prefix`` for example 70 is a terrible name, ``fragment_cache_prefix`` on the other hand is a good 71 name as includes the name of the extension (fragment cache). 72 """ 73 74 identifier: t.ClassVar[str] 75 76 def __init_subclass__(cls) -> None: 77 cls.identifier = f"{cls.__module__}.{cls.__name__}" 78 79 #: if this extension parses this is the list of tags it's listening to. 80 tags: t.Set[str] = set() 81 82 #: the priority of that extension. This is especially useful for 83 #: extensions that preprocess values. A lower value means higher 84 #: priority. 85 #: 86 #: .. versionadded:: 2.4 87 priority = 100 88 89 def __init__(self, environment: Environment) -> None: 90 self.environment = environment 91 92 def bind(self, environment: Environment) -> "te.Self": 93 """Create a copy of this extension bound to another environment.""" 94 rv = object.__new__(self.__class__) 95 rv.__dict__.update(self.__dict__) 96 rv.environment = environment 97 return rv 98 99 def preprocess( 100 self, source: str, name: t.Optional[str], filename: t.Optional[str] = None 101 ) -> str: 102 """This method is called before the actual lexing and can be used to 103 preprocess the source. The `filename` is optional. The return value 104 must be the preprocessed source. 105 """ 106 return source 107 108 def filter_stream( 109 self, stream: "TokenStream" 110 ) -> t.Union["TokenStream", t.Iterable["Token"]]: 111 """It's passed a :class:`~jinja2.lexer.TokenStream` that can be used 112 to filter tokens returned. This method has to return an iterable of 113 :class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a 114 :class:`~jinja2.lexer.TokenStream`. 115 """ 116 return stream 117 118 def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]: 119 """If any of the :attr:`tags` matched this method is called with the 120 parser as first argument. The token the parser stream is pointing at 121 is the name token that matched. This method has to return one or a 122 list of multiple nodes. 123 """ 124 raise NotImplementedError() 125 126 def attr( 127 self, name: str, lineno: t.Optional[int] = None 128 ) -> nodes.ExtensionAttribute: 129 """Return an attribute node for the current extension. This is useful 130 to pass constants on extensions to generated template code. 131 132 :: 133 134 self.attr('_my_attribute', lineno=lineno) 135 """ 136 return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno) 137 138 def call_method( 139 self, 140 name: str, 141 args: t.Optional[t.List[nodes.Expr]] = None, 142 kwargs: t.Optional[t.List[nodes.Keyword]] = None, 143 dyn_args: t.Optional[nodes.Expr] = None, 144 dyn_kwargs: t.Optional[nodes.Expr] = None, 145 lineno: t.Optional[int] = None, 146 ) -> nodes.Call: 147 """Call a method of the extension. This is a shortcut for 148 :meth:`attr` + :class:`jinja2.nodes.Call`. 149 """ 150 if args is None: 151 args = [] 152 if kwargs is None: 153 kwargs = [] 154 return nodes.Call( 155 self.attr(name, lineno=lineno), 156 args, 157 kwargs, 158 dyn_args, 159 dyn_kwargs, 160 lineno=lineno, 161 ) 162 163 164@pass_context 165def _gettext_alias( 166 __context: Context, *args: t.Any, **kwargs: t.Any 167) -> t.Union[t.Any, Undefined]: 168 return __context.call(__context.resolve("gettext"), *args, **kwargs) 169 170 171def _make_new_gettext(func: t.Callable[[str], str]) -> t.Callable[..., str]: 172 @pass_context 173 def gettext(__context: Context, __string: str, **variables: t.Any) -> str: 174 rv = __context.call(func, __string) 175 if __context.eval_ctx.autoescape: 176 rv = Markup(rv) 177 # Always treat as a format string, even if there are no 178 # variables. This makes translation strings more consistent 179 # and predictable. This requires escaping 180 return rv % variables # type: ignore 181 182 return gettext 183 184 185def _make_new_ngettext(func: t.Callable[[str, str, int], str]) -> t.Callable[..., str]: 186 @pass_context 187 def ngettext( 188 __context: Context, 189 __singular: str, 190 __plural: str, 191 __num: int, 192 **variables: t.Any, 193 ) -> str: 194 variables.setdefault("num", __num) 195 rv = __context.call(func, __singular, __plural, __num) 196 if __context.eval_ctx.autoescape: 197 rv = Markup(rv) 198 # Always treat as a format string, see gettext comment above. 199 return rv % variables # type: ignore 200 201 return ngettext 202 203 204def _make_new_pgettext(func: t.Callable[[str, str], str]) -> t.Callable[..., str]: 205 @pass_context 206 def pgettext( 207 __context: Context, __string_ctx: str, __string: str, **variables: t.Any 208 ) -> str: 209 variables.setdefault("context", __string_ctx) 210 rv = __context.call(func, __string_ctx, __string) 211 212 if __context.eval_ctx.autoescape: 213 rv = Markup(rv) 214 215 # Always treat as a format string, see gettext comment above. 216 return rv % variables # type: ignore 217 218 return pgettext 219 220 221def _make_new_npgettext( 222 func: t.Callable[[str, str, str, int], str], 223) -> t.Callable[..., str]: 224 @pass_context 225 def npgettext( 226 __context: Context, 227 __string_ctx: str, 228 __singular: str, 229 __plural: str, 230 __num: int, 231 **variables: t.Any, 232 ) -> str: 233 variables.setdefault("context", __string_ctx) 234 variables.setdefault("num", __num) 235 rv = __context.call(func, __string_ctx, __singular, __plural, __num) 236 237 if __context.eval_ctx.autoescape: 238 rv = Markup(rv) 239 240 # Always treat as a format string, see gettext comment above. 241 return rv % variables # type: ignore 242 243 return npgettext 244 245 246class InternationalizationExtension(Extension): 247 """This extension adds gettext support to Jinja.""" 248 249 tags = {"trans"} 250 251 # TODO: the i18n extension is currently reevaluating values in a few 252 # situations. Take this example: 253 # {% trans count=something() %}{{ count }} foo{% pluralize 254 # %}{{ count }} fooss{% endtrans %} 255 # something is called twice here. One time for the gettext value and 256 # the other time for the n-parameter of the ngettext function. 257 258 def __init__(self, environment: Environment) -> None: 259 super().__init__(environment) 260 environment.globals["_"] = _gettext_alias 261 environment.extend( 262 install_gettext_translations=self._install, 263 install_null_translations=self._install_null, 264 install_gettext_callables=self._install_callables, 265 uninstall_gettext_translations=self._uninstall, 266 extract_translations=self._extract, 267 newstyle_gettext=False, 268 ) 269 270 def _install( 271 self, translations: "_SupportedTranslations", newstyle: t.Optional[bool] = None 272 ) -> None: 273 # ugettext and ungettext are preferred in case the I18N library 274 # is providing compatibility with older Python versions. 275 gettext = getattr(translations, "ugettext", None) 276 if gettext is None: 277 gettext = translations.gettext 278 ngettext = getattr(translations, "ungettext", None) 279 if ngettext is None: 280 ngettext = translations.ngettext 281 282 pgettext = getattr(translations, "pgettext", None) 283 npgettext = getattr(translations, "npgettext", None) 284 self._install_callables( 285 gettext, ngettext, newstyle=newstyle, pgettext=pgettext, npgettext=npgettext 286 ) 287 288 def _install_null(self, newstyle: t.Optional[bool] = None) -> None: 289 import gettext 290 291 translations = gettext.NullTranslations() 292 293 if hasattr(translations, "pgettext"): 294 # Python < 3.8 295 pgettext = translations.pgettext 296 else: 297 298 def pgettext(c: str, s: str) -> str: # type: ignore[misc] 299 return s 300 301 if hasattr(translations, "npgettext"): 302 npgettext = translations.npgettext 303 else: 304 305 def npgettext(c: str, s: str, p: str, n: int) -> str: # type: ignore[misc] 306 return s if n == 1 else p 307 308 self._install_callables( 309 gettext=translations.gettext, 310 ngettext=translations.ngettext, 311 newstyle=newstyle, 312 pgettext=pgettext, 313 npgettext=npgettext, 314 ) 315 316 def _install_callables( 317 self, 318 gettext: t.Callable[[str], str], 319 ngettext: t.Callable[[str, str, int], str], 320 newstyle: t.Optional[bool] = None, 321 pgettext: t.Optional[t.Callable[[str, str], str]] = None, 322 npgettext: t.Optional[t.Callable[[str, str, str, int], str]] = None, 323 ) -> None: 324 if newstyle is not None: 325 self.environment.newstyle_gettext = newstyle # type: ignore 326 if self.environment.newstyle_gettext: # type: ignore 327 gettext = _make_new_gettext(gettext) 328 ngettext = _make_new_ngettext(ngettext) 329 330 if pgettext is not None: 331 pgettext = _make_new_pgettext(pgettext) 332 333 if npgettext is not None: 334 npgettext = _make_new_npgettext(npgettext) 335 336 self.environment.globals.update( 337 gettext=gettext, ngettext=ngettext, pgettext=pgettext, npgettext=npgettext 338 ) 339 340 def _uninstall(self, translations: "_SupportedTranslations") -> None: 341 for key in ("gettext", "ngettext", "pgettext", "npgettext"): 342 self.environment.globals.pop(key, None) 343 344 def _extract( 345 self, 346 source: t.Union[str, nodes.Template], 347 gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS, 348 ) -> t.Iterator[ 349 t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]] 350 ]: 351 if isinstance(source, str): 352 source = self.environment.parse(source) 353 return extract_from_ast(source, gettext_functions) 354 355 def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]: 356 """Parse a translatable tag.""" 357 lineno = next(parser.stream).lineno 358 359 context = None 360 context_token = parser.stream.next_if("string") 361 362 if context_token is not None: 363 context = context_token.value 364 365 # find all the variables referenced. Additionally a variable can be 366 # defined in the body of the trans block too, but this is checked at 367 # a later state. 368 plural_expr: t.Optional[nodes.Expr] = None 369 plural_expr_assignment: t.Optional[nodes.Assign] = None 370 num_called_num = False 371 variables: t.Dict[str, nodes.Expr] = {} 372 trimmed = None 373 while parser.stream.current.type != "block_end": 374 if variables: 375 parser.stream.expect("comma") 376 377 # skip colon for python compatibility 378 if parser.stream.skip_if("colon"): 379 break 380 381 token = parser.stream.expect("name") 382 if token.value in variables: 383 parser.fail( 384 f"translatable variable {token.value!r} defined twice.", 385 token.lineno, 386 exc=TemplateAssertionError, 387 ) 388 389 # expressions 390 if parser.stream.current.type == "assign": 391 next(parser.stream) 392 variables[token.value] = var = parser.parse_expression() 393 elif trimmed is None and token.value in ("trimmed", "notrimmed"): 394 trimmed = token.value == "trimmed" 395 continue 396 else: 397 variables[token.value] = var = nodes.Name(token.value, "load") 398 399 if plural_expr is None: 400 if isinstance(var, nodes.Call): 401 plural_expr = nodes.Name("_trans", "load") 402 variables[token.value] = plural_expr 403 plural_expr_assignment = nodes.Assign( 404 nodes.Name("_trans", "store"), var 405 ) 406 else: 407 plural_expr = var 408 num_called_num = token.value == "num" 409 410 parser.stream.expect("block_end") 411 412 plural = None 413 have_plural = False 414 referenced = set() 415 416 # now parse until endtrans or pluralize 417 singular_names, singular = self._parse_block(parser, True) 418 if singular_names: 419 referenced.update(singular_names) 420 if plural_expr is None: 421 plural_expr = nodes.Name(singular_names[0], "load") 422 num_called_num = singular_names[0] == "num" 423 424 # if we have a pluralize block, we parse that too 425 if parser.stream.current.test("name:pluralize"): 426 have_plural = True 427 next(parser.stream) 428 if parser.stream.current.type != "block_end": 429 token = parser.stream.expect("name") 430 if token.value not in variables: 431 parser.fail( 432 f"unknown variable {token.value!r} for pluralization", 433 token.lineno, 434 exc=TemplateAssertionError, 435 ) 436 plural_expr = variables[token.value] 437 num_called_num = token.value == "num" 438 parser.stream.expect("block_end") 439 plural_names, plural = self._parse_block(parser, False) 440 next(parser.stream) 441 referenced.update(plural_names) 442 else: 443 next(parser.stream) 444 445 # register free names as simple name expressions 446 for name in referenced: 447 if name not in variables: 448 variables[name] = nodes.Name(name, "load") 449 450 if not have_plural: 451 plural_expr = None 452 elif plural_expr is None: 453 parser.fail("pluralize without variables", lineno) 454 455 if trimmed is None: 456 trimmed = self.environment.policies["ext.i18n.trimmed"] 457 if trimmed: 458 singular = self._trim_whitespace(singular) 459 if plural: 460 plural = self._trim_whitespace(plural) 461 462 node = self._make_node( 463 singular, 464 plural, 465 context, 466 variables, 467 plural_expr, 468 bool(referenced), 469 num_called_num and have_plural, 470 ) 471 node.set_lineno(lineno) 472 if plural_expr_assignment is not None: 473 return [plural_expr_assignment, node] 474 else: 475 return node 476 477 def _trim_whitespace(self, string: str, _ws_re: t.Pattern[str] = _ws_re) -> str: 478 return _ws_re.sub(" ", string.strip()) 479 480 def _parse_block( 481 self, parser: "Parser", allow_pluralize: bool 482 ) -> t.Tuple[t.List[str], str]: 483 """Parse until the next block tag with a given name.""" 484 referenced = [] 485 buf = [] 486 487 while True: 488 if parser.stream.current.type == "data": 489 buf.append(parser.stream.current.value.replace("%", "%%")) 490 next(parser.stream) 491 elif parser.stream.current.type == "variable_begin": 492 next(parser.stream) 493 name = parser.stream.expect("name").value 494 referenced.append(name) 495 buf.append(f"%({name})s") 496 parser.stream.expect("variable_end") 497 elif parser.stream.current.type == "block_begin": 498 next(parser.stream) 499 block_name = ( 500 parser.stream.current.value 501 if parser.stream.current.type == "name" 502 else None 503 ) 504 if block_name == "endtrans": 505 break 506 elif block_name == "pluralize": 507 if allow_pluralize: 508 break 509 parser.fail( 510 "a translatable section can have only one pluralize section" 511 ) 512 elif block_name == "trans": 513 parser.fail( 514 "trans blocks can't be nested; did you mean `endtrans`?" 515 ) 516 parser.fail( 517 f"control structures in translatable sections are not allowed; " 518 f"saw `{block_name}`" 519 ) 520 elif parser.stream.eos: 521 parser.fail("unclosed translation block") 522 else: 523 raise RuntimeError("internal parser error") 524 525 return referenced, concat(buf) 526 527 def _make_node( 528 self, 529 singular: str, 530 plural: t.Optional[str], 531 context: t.Optional[str], 532 variables: t.Dict[str, nodes.Expr], 533 plural_expr: t.Optional[nodes.Expr], 534 vars_referenced: bool, 535 num_called_num: bool, 536 ) -> nodes.Output: 537 """Generates a useful node from the data provided.""" 538 newstyle = self.environment.newstyle_gettext # type: ignore 539 node: nodes.Expr 540 541 # no variables referenced? no need to escape for old style 542 # gettext invocations only if there are vars. 543 if not vars_referenced and not newstyle: 544 singular = singular.replace("%%", "%") 545 if plural: 546 plural = plural.replace("%%", "%") 547 548 func_name = "gettext" 549 func_args: t.List[nodes.Expr] = [nodes.Const(singular)] 550 551 if context is not None: 552 func_args.insert(0, nodes.Const(context)) 553 func_name = f"p{func_name}" 554 555 if plural_expr is not None: 556 func_name = f"n{func_name}" 557 func_args.extend((nodes.Const(plural), plural_expr)) 558 559 node = nodes.Call(nodes.Name(func_name, "load"), func_args, [], None, None) 560 561 # in case newstyle gettext is used, the method is powerful 562 # enough to handle the variable expansion and autoescape 563 # handling itself 564 if newstyle: 565 for key, value in variables.items(): 566 # the function adds that later anyways in case num was 567 # called num, so just skip it. 568 if num_called_num and key == "num": 569 continue 570 node.kwargs.append(nodes.Keyword(key, value)) 571 572 # otherwise do that here 573 else: 574 # mark the return value as safe if we are in an 575 # environment with autoescaping turned on 576 node = nodes.MarkSafeIfAutoescape(node) 577 if variables: 578 node = nodes.Mod( 579 node, 580 nodes.Dict( 581 [ 582 nodes.Pair(nodes.Const(key), value) 583 for key, value in variables.items() 584 ] 585 ), 586 ) 587 return nodes.Output([node]) 588 589 590class ExprStmtExtension(Extension): 591 """Adds a `do` tag to Jinja that works like the print statement just 592 that it doesn't print the return value. 593 """ 594 595 tags = {"do"} 596 597 def parse(self, parser: "Parser") -> nodes.ExprStmt: 598 node = nodes.ExprStmt(lineno=next(parser.stream).lineno) 599 node.node = parser.parse_tuple() 600 return node 601 602 603class LoopControlExtension(Extension): 604 """Adds break and continue to the template engine.""" 605 606 tags = {"break", "continue"} 607 608 def parse(self, parser: "Parser") -> t.Union[nodes.Break, nodes.Continue]: 609 token = next(parser.stream) 610 if token.value == "break": 611 return nodes.Break(lineno=token.lineno) 612 return nodes.Continue(lineno=token.lineno) 613 614 615class DebugExtension(Extension): 616 """A ``{% debug %}`` tag that dumps the available variables, 617 filters, and tests. 618 619 .. code-block:: html+jinja 620 621 <pre>{% debug %}</pre> 622 623 .. code-block:: text 624 625 {'context': {'cycler': <class 'jinja2.utils.Cycler'>, 626 ..., 627 'namespace': <class 'jinja2.utils.Namespace'>}, 628 'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd', 629 ..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'], 630 'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined', 631 ..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']} 632 633 .. versionadded:: 2.11.0 634 """ 635 636 tags = {"debug"} 637 638 def parse(self, parser: "Parser") -> nodes.Output: 639 lineno = parser.stream.expect("name:debug").lineno 640 context = nodes.ContextReference() 641 result = self.call_method("_render", [context], lineno=lineno) 642 return nodes.Output([result], lineno=lineno) 643 644 def _render(self, context: Context) -> str: 645 result = { 646 "context": context.get_all(), 647 "filters": sorted(self.environment.filters.keys()), 648 "tests": sorted(self.environment.tests.keys()), 649 } 650 651 # Set the depth since the intent is to show the top few names. 652 return pprint.pformat(result, depth=3, compact=True) 653 654 655def extract_from_ast( 656 ast: nodes.Template, 657 gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS, 658 babel_style: bool = True, 659) -> t.Iterator[ 660 t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]] 661]: 662 """Extract localizable strings from the given template node. Per 663 default this function returns matches in babel style that means non string 664 parameters as well as keyword arguments are returned as `None`. This 665 allows Babel to figure out what you really meant if you are using 666 gettext functions that allow keyword arguments for placeholder expansion. 667 If you don't want that behavior set the `babel_style` parameter to `False` 668 which causes only strings to be returned and parameters are always stored 669 in tuples. As a consequence invalid gettext calls (calls without a single 670 string parameter or string parameters after non-string parameters) are 671 skipped. 672 673 This example explains the behavior: 674 675 >>> from jinja2 import Environment 676 >>> env = Environment() 677 >>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}') 678 >>> list(extract_from_ast(node)) 679 [(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))] 680 >>> list(extract_from_ast(node, babel_style=False)) 681 [(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))] 682 683 For every string found this function yields a ``(lineno, function, 684 message)`` tuple, where: 685 686 * ``lineno`` is the number of the line on which the string was found, 687 * ``function`` is the name of the ``gettext`` function used (if the 688 string was extracted from embedded Python code), and 689 * ``message`` is the string, or a tuple of strings for functions 690 with multiple string arguments. 691 692 This extraction function operates on the AST and is because of that unable 693 to extract any comments. For comment support you have to use the babel 694 extraction interface or extract comments yourself. 695 """ 696 out: t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]] 697 698 for node in ast.find_all(nodes.Call): 699 if ( 700 not isinstance(node.node, nodes.Name) 701 or node.node.name not in gettext_functions 702 ): 703 continue 704 705 strings: t.List[t.Optional[str]] = [] 706 707 for arg in node.args: 708 if isinstance(arg, nodes.Const) and isinstance(arg.value, str): 709 strings.append(arg.value) 710 else: 711 strings.append(None) 712 713 for _ in node.kwargs: 714 strings.append(None) 715 if node.dyn_args is not None: 716 strings.append(None) 717 if node.dyn_kwargs is not None: 718 strings.append(None) 719 720 if not babel_style: 721 out = tuple(x for x in strings if x is not None) 722 723 if not out: 724 continue 725 else: 726 if len(strings) == 1: 727 out = strings[0] 728 else: 729 out = tuple(strings) 730 731 yield node.lineno, node.node.name, out 732 733 734class _CommentFinder: 735 """Helper class to find comments in a token stream. Can only 736 find comments for gettext calls forwards. Once the comment 737 from line 4 is found, a comment for line 1 will not return a 738 usable value. 739 """ 740 741 def __init__( 742 self, tokens: t.Sequence[t.Tuple[int, str, str]], comment_tags: t.Sequence[str] 743 ) -> None: 744 self.tokens = tokens 745 self.comment_tags = comment_tags 746 self.offset = 0 747 self.last_lineno = 0 748 749 def find_backwards(self, offset: int) -> t.List[str]: 750 try: 751 for _, token_type, token_value in reversed( 752 self.tokens[self.offset : offset] 753 ): 754 if token_type in ("comment", "linecomment"): 755 try: 756 prefix, comment = token_value.split(None, 1) 757 except ValueError: 758 continue 759 if prefix in self.comment_tags: 760 return [comment.rstrip()] 761 return [] 762 finally: 763 self.offset = offset 764 765 def find_comments(self, lineno: int) -> t.List[str]: 766 if not self.comment_tags or self.last_lineno > lineno: 767 return [] 768 for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]): 769 if token_lineno > lineno: 770 return self.find_backwards(self.offset + idx) 771 return self.find_backwards(len(self.tokens)) 772 773 774def babel_extract( 775 fileobj: t.BinaryIO, 776 keywords: t.Sequence[str], 777 comment_tags: t.Sequence[str], 778 options: t.Dict[str, t.Any], 779) -> t.Iterator[ 780 t.Tuple[ 781 int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]], t.List[str] 782 ] 783]: 784 """Babel extraction method for Jinja templates. 785 786 .. versionchanged:: 2.3 787 Basic support for translation comments was added. If `comment_tags` 788 is now set to a list of keywords for extraction, the extractor will 789 try to find the best preceding comment that begins with one of the 790 keywords. For best results, make sure to not have more than one 791 gettext call in one line of code and the matching comment in the 792 same line or the line before. 793 794 .. versionchanged:: 2.5.1 795 The `newstyle_gettext` flag can be set to `True` to enable newstyle 796 gettext calls. 797 798 .. versionchanged:: 2.7 799 A `silent` option can now be provided. If set to `False` template 800 syntax errors are propagated instead of being ignored. 801 802 :param fileobj: the file-like object the messages should be extracted from 803 :param keywords: a list of keywords (i.e. function names) that should be 804 recognized as translation functions 805 :param comment_tags: a list of translator tags to search for and include 806 in the results. 807 :param options: a dictionary of additional options (optional) 808 :return: an iterator over ``(lineno, funcname, message, comments)`` tuples. 809 (comments will be empty currently) 810 """ 811 extensions: t.Dict[t.Type[Extension], None] = {} 812 813 for extension_name in options.get("extensions", "").split(","): 814 extension_name = extension_name.strip() 815 816 if not extension_name: 817 continue 818 819 extensions[import_string(extension_name)] = None 820 821 if InternationalizationExtension not in extensions: 822 extensions[InternationalizationExtension] = None 823 824 def getbool(options: t.Mapping[str, str], key: str, default: bool = False) -> bool: 825 return options.get(key, str(default)).lower() in {"1", "on", "yes", "true"} 826 827 silent = getbool(options, "silent", True) 828 environment = Environment( 829 options.get("block_start_string", defaults.BLOCK_START_STRING), 830 options.get("block_end_string", defaults.BLOCK_END_STRING), 831 options.get("variable_start_string", defaults.VARIABLE_START_STRING), 832 options.get("variable_end_string", defaults.VARIABLE_END_STRING), 833 options.get("comment_start_string", defaults.COMMENT_START_STRING), 834 options.get("comment_end_string", defaults.COMMENT_END_STRING), 835 options.get("line_statement_prefix") or defaults.LINE_STATEMENT_PREFIX, 836 options.get("line_comment_prefix") or defaults.LINE_COMMENT_PREFIX, 837 getbool(options, "trim_blocks", defaults.TRIM_BLOCKS), 838 getbool(options, "lstrip_blocks", defaults.LSTRIP_BLOCKS), 839 defaults.NEWLINE_SEQUENCE, 840 getbool(options, "keep_trailing_newline", defaults.KEEP_TRAILING_NEWLINE), 841 tuple(extensions), 842 cache_size=0, 843 auto_reload=False, 844 ) 845 846 if getbool(options, "trimmed"): 847 environment.policies["ext.i18n.trimmed"] = True 848 if getbool(options, "newstyle_gettext"): 849 environment.newstyle_gettext = True # type: ignore 850 851 source = fileobj.read().decode(options.get("encoding", "utf-8")) 852 try: 853 node = environment.parse(source) 854 tokens = list(environment.lex(environment.preprocess(source))) 855 except TemplateSyntaxError: 856 if not silent: 857 raise 858 # skip templates with syntax errors 859 return 860 861 finder = _CommentFinder(tokens, comment_tags) 862 for lineno, func, message in extract_from_ast(node, keywords): 863 yield lineno, func, message, finder.find_comments(lineno) 864 865 866#: nicer import names 867i18n = InternationalizationExtension 868do = ExprStmtExtension 869loopcontrols = LoopControlExtension 870debug = DebugExtension
56class Extension: 57 """Extensions can be used to add extra functionality to the Jinja template 58 system at the parser level. Custom extensions are bound to an environment 59 but may not store environment specific data on `self`. The reason for 60 this is that an extension can be bound to another environment (for 61 overlays) by creating a copy and reassigning the `environment` attribute. 62 63 As extensions are created by the environment they cannot accept any 64 arguments for configuration. One may want to work around that by using 65 a factory function, but that is not possible as extensions are identified 66 by their import name. The correct way to configure the extension is 67 storing the configuration values on the environment. Because this way the 68 environment ends up acting as central configuration storage the 69 attributes may clash which is why extensions have to ensure that the names 70 they choose for configuration are not too generic. ``prefix`` for example 71 is a terrible name, ``fragment_cache_prefix`` on the other hand is a good 72 name as includes the name of the extension (fragment cache). 73 """ 74 75 identifier: t.ClassVar[str] 76 77 def __init_subclass__(cls) -> None: 78 cls.identifier = f"{cls.__module__}.{cls.__name__}" 79 80 #: if this extension parses this is the list of tags it's listening to. 81 tags: t.Set[str] = set() 82 83 #: the priority of that extension. This is especially useful for 84 #: extensions that preprocess values. A lower value means higher 85 #: priority. 86 #: 87 #: .. versionadded:: 2.4 88 priority = 100 89 90 def __init__(self, environment: Environment) -> None: 91 self.environment = environment 92 93 def bind(self, environment: Environment) -> "te.Self": 94 """Create a copy of this extension bound to another environment.""" 95 rv = object.__new__(self.__class__) 96 rv.__dict__.update(self.__dict__) 97 rv.environment = environment 98 return rv 99 100 def preprocess( 101 self, source: str, name: t.Optional[str], filename: t.Optional[str] = None 102 ) -> str: 103 """This method is called before the actual lexing and can be used to 104 preprocess the source. The `filename` is optional. The return value 105 must be the preprocessed source. 106 """ 107 return source 108 109 def filter_stream( 110 self, stream: "TokenStream" 111 ) -> t.Union["TokenStream", t.Iterable["Token"]]: 112 """It's passed a :class:`~jinja2.lexer.TokenStream` that can be used 113 to filter tokens returned. This method has to return an iterable of 114 :class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a 115 :class:`~jinja2.lexer.TokenStream`. 116 """ 117 return stream 118 119 def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]: 120 """If any of the :attr:`tags` matched this method is called with the 121 parser as first argument. The token the parser stream is pointing at 122 is the name token that matched. This method has to return one or a 123 list of multiple nodes. 124 """ 125 raise NotImplementedError() 126 127 def attr( 128 self, name: str, lineno: t.Optional[int] = None 129 ) -> nodes.ExtensionAttribute: 130 """Return an attribute node for the current extension. This is useful 131 to pass constants on extensions to generated template code. 132 133 :: 134 135 self.attr('_my_attribute', lineno=lineno) 136 """ 137 return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno) 138 139 def call_method( 140 self, 141 name: str, 142 args: t.Optional[t.List[nodes.Expr]] = None, 143 kwargs: t.Optional[t.List[nodes.Keyword]] = None, 144 dyn_args: t.Optional[nodes.Expr] = None, 145 dyn_kwargs: t.Optional[nodes.Expr] = None, 146 lineno: t.Optional[int] = None, 147 ) -> nodes.Call: 148 """Call a method of the extension. This is a shortcut for 149 :meth:`attr` + :class:`jinja2.nodes.Call`. 150 """ 151 if args is None: 152 args = [] 153 if kwargs is None: 154 kwargs = [] 155 return nodes.Call( 156 self.attr(name, lineno=lineno), 157 args, 158 kwargs, 159 dyn_args, 160 dyn_kwargs, 161 lineno=lineno, 162 )
Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on self
. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the environment
attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. prefix
for example
is a terrible name, fragment_cache_prefix
on the other hand is a good
name as includes the name of the extension (fragment cache).
93 def bind(self, environment: Environment) -> "te.Self": 94 """Create a copy of this extension bound to another environment.""" 95 rv = object.__new__(self.__class__) 96 rv.__dict__.update(self.__dict__) 97 rv.environment = environment 98 return rv
Create a copy of this extension bound to another environment.
100 def preprocess( 101 self, source: str, name: t.Optional[str], filename: t.Optional[str] = None 102 ) -> str: 103 """This method is called before the actual lexing and can be used to 104 preprocess the source. The `filename` is optional. The return value 105 must be the preprocessed source. 106 """ 107 return source
This method is called before the actual lexing and can be used to
preprocess the source. The filename
is optional. The return value
must be the preprocessed source.
109 def filter_stream( 110 self, stream: "TokenStream" 111 ) -> t.Union["TokenStream", t.Iterable["Token"]]: 112 """It's passed a :class:`~jinja2.lexer.TokenStream` that can be used 113 to filter tokens returned. This method has to return an iterable of 114 :class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a 115 :class:`~jinja2.lexer.TokenStream`. 116 """ 117 return stream
It's passed a ~jinja2.lexer.TokenStream
that can be used
to filter tokens returned. This method has to return an iterable of
~jinja2.lexer.Token
\s, but it doesn't have to return a
~jinja2.lexer.TokenStream
.
119 def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]: 120 """If any of the :attr:`tags` matched this method is called with the 121 parser as first argument. The token the parser stream is pointing at 122 is the name token that matched. This method has to return one or a 123 list of multiple nodes. 124 """ 125 raise NotImplementedError()
If any of the tags
matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
127 def attr( 128 self, name: str, lineno: t.Optional[int] = None 129 ) -> nodes.ExtensionAttribute: 130 """Return an attribute node for the current extension. This is useful 131 to pass constants on extensions to generated template code. 132 133 :: 134 135 self.attr('_my_attribute', lineno=lineno) 136 """ 137 return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
Return an attribute node for the current extension. This is useful to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
139 def call_method( 140 self, 141 name: str, 142 args: t.Optional[t.List[nodes.Expr]] = None, 143 kwargs: t.Optional[t.List[nodes.Keyword]] = None, 144 dyn_args: t.Optional[nodes.Expr] = None, 145 dyn_kwargs: t.Optional[nodes.Expr] = None, 146 lineno: t.Optional[int] = None, 147 ) -> nodes.Call: 148 """Call a method of the extension. This is a shortcut for 149 :meth:`attr` + :class:`jinja2.nodes.Call`. 150 """ 151 if args is None: 152 args = [] 153 if kwargs is None: 154 kwargs = [] 155 return nodes.Call( 156 self.attr(name, lineno=lineno), 157 args, 158 kwargs, 159 dyn_args, 160 dyn_kwargs, 161 lineno=lineno, 162 )
Call a method of the extension. This is a shortcut for
attr()
+ jinja2.nodes.Call
.
247class InternationalizationExtension(Extension): 248 """This extension adds gettext support to Jinja.""" 249 250 tags = {"trans"} 251 252 # TODO: the i18n extension is currently reevaluating values in a few 253 # situations. Take this example: 254 # {% trans count=something() %}{{ count }} foo{% pluralize 255 # %}{{ count }} fooss{% endtrans %} 256 # something is called twice here. One time for the gettext value and 257 # the other time for the n-parameter of the ngettext function. 258 259 def __init__(self, environment: Environment) -> None: 260 super().__init__(environment) 261 environment.globals["_"] = _gettext_alias 262 environment.extend( 263 install_gettext_translations=self._install, 264 install_null_translations=self._install_null, 265 install_gettext_callables=self._install_callables, 266 uninstall_gettext_translations=self._uninstall, 267 extract_translations=self._extract, 268 newstyle_gettext=False, 269 ) 270 271 def _install( 272 self, translations: "_SupportedTranslations", newstyle: t.Optional[bool] = None 273 ) -> None: 274 # ugettext and ungettext are preferred in case the I18N library 275 # is providing compatibility with older Python versions. 276 gettext = getattr(translations, "ugettext", None) 277 if gettext is None: 278 gettext = translations.gettext 279 ngettext = getattr(translations, "ungettext", None) 280 if ngettext is None: 281 ngettext = translations.ngettext 282 283 pgettext = getattr(translations, "pgettext", None) 284 npgettext = getattr(translations, "npgettext", None) 285 self._install_callables( 286 gettext, ngettext, newstyle=newstyle, pgettext=pgettext, npgettext=npgettext 287 ) 288 289 def _install_null(self, newstyle: t.Optional[bool] = None) -> None: 290 import gettext 291 292 translations = gettext.NullTranslations() 293 294 if hasattr(translations, "pgettext"): 295 # Python < 3.8 296 pgettext = translations.pgettext 297 else: 298 299 def pgettext(c: str, s: str) -> str: # type: ignore[misc] 300 return s 301 302 if hasattr(translations, "npgettext"): 303 npgettext = translations.npgettext 304 else: 305 306 def npgettext(c: str, s: str, p: str, n: int) -> str: # type: ignore[misc] 307 return s if n == 1 else p 308 309 self._install_callables( 310 gettext=translations.gettext, 311 ngettext=translations.ngettext, 312 newstyle=newstyle, 313 pgettext=pgettext, 314 npgettext=npgettext, 315 ) 316 317 def _install_callables( 318 self, 319 gettext: t.Callable[[str], str], 320 ngettext: t.Callable[[str, str, int], str], 321 newstyle: t.Optional[bool] = None, 322 pgettext: t.Optional[t.Callable[[str, str], str]] = None, 323 npgettext: t.Optional[t.Callable[[str, str, str, int], str]] = None, 324 ) -> None: 325 if newstyle is not None: 326 self.environment.newstyle_gettext = newstyle # type: ignore 327 if self.environment.newstyle_gettext: # type: ignore 328 gettext = _make_new_gettext(gettext) 329 ngettext = _make_new_ngettext(ngettext) 330 331 if pgettext is not None: 332 pgettext = _make_new_pgettext(pgettext) 333 334 if npgettext is not None: 335 npgettext = _make_new_npgettext(npgettext) 336 337 self.environment.globals.update( 338 gettext=gettext, ngettext=ngettext, pgettext=pgettext, npgettext=npgettext 339 ) 340 341 def _uninstall(self, translations: "_SupportedTranslations") -> None: 342 for key in ("gettext", "ngettext", "pgettext", "npgettext"): 343 self.environment.globals.pop(key, None) 344 345 def _extract( 346 self, 347 source: t.Union[str, nodes.Template], 348 gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS, 349 ) -> t.Iterator[ 350 t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]] 351 ]: 352 if isinstance(source, str): 353 source = self.environment.parse(source) 354 return extract_from_ast(source, gettext_functions) 355 356 def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]: 357 """Parse a translatable tag.""" 358 lineno = next(parser.stream).lineno 359 360 context = None 361 context_token = parser.stream.next_if("string") 362 363 if context_token is not None: 364 context = context_token.value 365 366 # find all the variables referenced. Additionally a variable can be 367 # defined in the body of the trans block too, but this is checked at 368 # a later state. 369 plural_expr: t.Optional[nodes.Expr] = None 370 plural_expr_assignment: t.Optional[nodes.Assign] = None 371 num_called_num = False 372 variables: t.Dict[str, nodes.Expr] = {} 373 trimmed = None 374 while parser.stream.current.type != "block_end": 375 if variables: 376 parser.stream.expect("comma") 377 378 # skip colon for python compatibility 379 if parser.stream.skip_if("colon"): 380 break 381 382 token = parser.stream.expect("name") 383 if token.value in variables: 384 parser.fail( 385 f"translatable variable {token.value!r} defined twice.", 386 token.lineno, 387 exc=TemplateAssertionError, 388 ) 389 390 # expressions 391 if parser.stream.current.type == "assign": 392 next(parser.stream) 393 variables[token.value] = var = parser.parse_expression() 394 elif trimmed is None and token.value in ("trimmed", "notrimmed"): 395 trimmed = token.value == "trimmed" 396 continue 397 else: 398 variables[token.value] = var = nodes.Name(token.value, "load") 399 400 if plural_expr is None: 401 if isinstance(var, nodes.Call): 402 plural_expr = nodes.Name("_trans", "load") 403 variables[token.value] = plural_expr 404 plural_expr_assignment = nodes.Assign( 405 nodes.Name("_trans", "store"), var 406 ) 407 else: 408 plural_expr = var 409 num_called_num = token.value == "num" 410 411 parser.stream.expect("block_end") 412 413 plural = None 414 have_plural = False 415 referenced = set() 416 417 # now parse until endtrans or pluralize 418 singular_names, singular = self._parse_block(parser, True) 419 if singular_names: 420 referenced.update(singular_names) 421 if plural_expr is None: 422 plural_expr = nodes.Name(singular_names[0], "load") 423 num_called_num = singular_names[0] == "num" 424 425 # if we have a pluralize block, we parse that too 426 if parser.stream.current.test("name:pluralize"): 427 have_plural = True 428 next(parser.stream) 429 if parser.stream.current.type != "block_end": 430 token = parser.stream.expect("name") 431 if token.value not in variables: 432 parser.fail( 433 f"unknown variable {token.value!r} for pluralization", 434 token.lineno, 435 exc=TemplateAssertionError, 436 ) 437 plural_expr = variables[token.value] 438 num_called_num = token.value == "num" 439 parser.stream.expect("block_end") 440 plural_names, plural = self._parse_block(parser, False) 441 next(parser.stream) 442 referenced.update(plural_names) 443 else: 444 next(parser.stream) 445 446 # register free names as simple name expressions 447 for name in referenced: 448 if name not in variables: 449 variables[name] = nodes.Name(name, "load") 450 451 if not have_plural: 452 plural_expr = None 453 elif plural_expr is None: 454 parser.fail("pluralize without variables", lineno) 455 456 if trimmed is None: 457 trimmed = self.environment.policies["ext.i18n.trimmed"] 458 if trimmed: 459 singular = self._trim_whitespace(singular) 460 if plural: 461 plural = self._trim_whitespace(plural) 462 463 node = self._make_node( 464 singular, 465 plural, 466 context, 467 variables, 468 plural_expr, 469 bool(referenced), 470 num_called_num and have_plural, 471 ) 472 node.set_lineno(lineno) 473 if plural_expr_assignment is not None: 474 return [plural_expr_assignment, node] 475 else: 476 return node 477 478 def _trim_whitespace(self, string: str, _ws_re: t.Pattern[str] = _ws_re) -> str: 479 return _ws_re.sub(" ", string.strip()) 480 481 def _parse_block( 482 self, parser: "Parser", allow_pluralize: bool 483 ) -> t.Tuple[t.List[str], str]: 484 """Parse until the next block tag with a given name.""" 485 referenced = [] 486 buf = [] 487 488 while True: 489 if parser.stream.current.type == "data": 490 buf.append(parser.stream.current.value.replace("%", "%%")) 491 next(parser.stream) 492 elif parser.stream.current.type == "variable_begin": 493 next(parser.stream) 494 name = parser.stream.expect("name").value 495 referenced.append(name) 496 buf.append(f"%({name})s") 497 parser.stream.expect("variable_end") 498 elif parser.stream.current.type == "block_begin": 499 next(parser.stream) 500 block_name = ( 501 parser.stream.current.value 502 if parser.stream.current.type == "name" 503 else None 504 ) 505 if block_name == "endtrans": 506 break 507 elif block_name == "pluralize": 508 if allow_pluralize: 509 break 510 parser.fail( 511 "a translatable section can have only one pluralize section" 512 ) 513 elif block_name == "trans": 514 parser.fail( 515 "trans blocks can't be nested; did you mean `endtrans`?" 516 ) 517 parser.fail( 518 f"control structures in translatable sections are not allowed; " 519 f"saw `{block_name}`" 520 ) 521 elif parser.stream.eos: 522 parser.fail("unclosed translation block") 523 else: 524 raise RuntimeError("internal parser error") 525 526 return referenced, concat(buf) 527 528 def _make_node( 529 self, 530 singular: str, 531 plural: t.Optional[str], 532 context: t.Optional[str], 533 variables: t.Dict[str, nodes.Expr], 534 plural_expr: t.Optional[nodes.Expr], 535 vars_referenced: bool, 536 num_called_num: bool, 537 ) -> nodes.Output: 538 """Generates a useful node from the data provided.""" 539 newstyle = self.environment.newstyle_gettext # type: ignore 540 node: nodes.Expr 541 542 # no variables referenced? no need to escape for old style 543 # gettext invocations only if there are vars. 544 if not vars_referenced and not newstyle: 545 singular = singular.replace("%%", "%") 546 if plural: 547 plural = plural.replace("%%", "%") 548 549 func_name = "gettext" 550 func_args: t.List[nodes.Expr] = [nodes.Const(singular)] 551 552 if context is not None: 553 func_args.insert(0, nodes.Const(context)) 554 func_name = f"p{func_name}" 555 556 if plural_expr is not None: 557 func_name = f"n{func_name}" 558 func_args.extend((nodes.Const(plural), plural_expr)) 559 560 node = nodes.Call(nodes.Name(func_name, "load"), func_args, [], None, None) 561 562 # in case newstyle gettext is used, the method is powerful 563 # enough to handle the variable expansion and autoescape 564 # handling itself 565 if newstyle: 566 for key, value in variables.items(): 567 # the function adds that later anyways in case num was 568 # called num, so just skip it. 569 if num_called_num and key == "num": 570 continue 571 node.kwargs.append(nodes.Keyword(key, value)) 572 573 # otherwise do that here 574 else: 575 # mark the return value as safe if we are in an 576 # environment with autoescaping turned on 577 node = nodes.MarkSafeIfAutoescape(node) 578 if variables: 579 node = nodes.Mod( 580 node, 581 nodes.Dict( 582 [ 583 nodes.Pair(nodes.Const(key), value) 584 for key, value in variables.items() 585 ] 586 ), 587 ) 588 return nodes.Output([node])
This extension adds gettext support to Jinja.
259 def __init__(self, environment: Environment) -> None: 260 super().__init__(environment) 261 environment.globals["_"] = _gettext_alias 262 environment.extend( 263 install_gettext_translations=self._install, 264 install_null_translations=self._install_null, 265 install_gettext_callables=self._install_callables, 266 uninstall_gettext_translations=self._uninstall, 267 extract_translations=self._extract, 268 newstyle_gettext=False, 269 )
356 def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]: 357 """Parse a translatable tag.""" 358 lineno = next(parser.stream).lineno 359 360 context = None 361 context_token = parser.stream.next_if("string") 362 363 if context_token is not None: 364 context = context_token.value 365 366 # find all the variables referenced. Additionally a variable can be 367 # defined in the body of the trans block too, but this is checked at 368 # a later state. 369 plural_expr: t.Optional[nodes.Expr] = None 370 plural_expr_assignment: t.Optional[nodes.Assign] = None 371 num_called_num = False 372 variables: t.Dict[str, nodes.Expr] = {} 373 trimmed = None 374 while parser.stream.current.type != "block_end": 375 if variables: 376 parser.stream.expect("comma") 377 378 # skip colon for python compatibility 379 if parser.stream.skip_if("colon"): 380 break 381 382 token = parser.stream.expect("name") 383 if token.value in variables: 384 parser.fail( 385 f"translatable variable {token.value!r} defined twice.", 386 token.lineno, 387 exc=TemplateAssertionError, 388 ) 389 390 # expressions 391 if parser.stream.current.type == "assign": 392 next(parser.stream) 393 variables[token.value] = var = parser.parse_expression() 394 elif trimmed is None and token.value in ("trimmed", "notrimmed"): 395 trimmed = token.value == "trimmed" 396 continue 397 else: 398 variables[token.value] = var = nodes.Name(token.value, "load") 399 400 if plural_expr is None: 401 if isinstance(var, nodes.Call): 402 plural_expr = nodes.Name("_trans", "load") 403 variables[token.value] = plural_expr 404 plural_expr_assignment = nodes.Assign( 405 nodes.Name("_trans", "store"), var 406 ) 407 else: 408 plural_expr = var 409 num_called_num = token.value == "num" 410 411 parser.stream.expect("block_end") 412 413 plural = None 414 have_plural = False 415 referenced = set() 416 417 # now parse until endtrans or pluralize 418 singular_names, singular = self._parse_block(parser, True) 419 if singular_names: 420 referenced.update(singular_names) 421 if plural_expr is None: 422 plural_expr = nodes.Name(singular_names[0], "load") 423 num_called_num = singular_names[0] == "num" 424 425 # if we have a pluralize block, we parse that too 426 if parser.stream.current.test("name:pluralize"): 427 have_plural = True 428 next(parser.stream) 429 if parser.stream.current.type != "block_end": 430 token = parser.stream.expect("name") 431 if token.value not in variables: 432 parser.fail( 433 f"unknown variable {token.value!r} for pluralization", 434 token.lineno, 435 exc=TemplateAssertionError, 436 ) 437 plural_expr = variables[token.value] 438 num_called_num = token.value == "num" 439 parser.stream.expect("block_end") 440 plural_names, plural = self._parse_block(parser, False) 441 next(parser.stream) 442 referenced.update(plural_names) 443 else: 444 next(parser.stream) 445 446 # register free names as simple name expressions 447 for name in referenced: 448 if name not in variables: 449 variables[name] = nodes.Name(name, "load") 450 451 if not have_plural: 452 plural_expr = None 453 elif plural_expr is None: 454 parser.fail("pluralize without variables", lineno) 455 456 if trimmed is None: 457 trimmed = self.environment.policies["ext.i18n.trimmed"] 458 if trimmed: 459 singular = self._trim_whitespace(singular) 460 if plural: 461 plural = self._trim_whitespace(plural) 462 463 node = self._make_node( 464 singular, 465 plural, 466 context, 467 variables, 468 plural_expr, 469 bool(referenced), 470 num_called_num and have_plural, 471 ) 472 node.set_lineno(lineno) 473 if plural_expr_assignment is not None: 474 return [plural_expr_assignment, node] 475 else: 476 return node
Parse a translatable tag.
Inherited Members
591class ExprStmtExtension(Extension): 592 """Adds a `do` tag to Jinja that works like the print statement just 593 that it doesn't print the return value. 594 """ 595 596 tags = {"do"} 597 598 def parse(self, parser: "Parser") -> nodes.ExprStmt: 599 node = nodes.ExprStmt(lineno=next(parser.stream).lineno) 600 node.node = parser.parse_tuple() 601 return node
Adds a do
tag to Jinja that works like the print statement just
that it doesn't print the return value.
598 def parse(self, parser: "Parser") -> nodes.ExprStmt: 599 node = nodes.ExprStmt(lineno=next(parser.stream).lineno) 600 node.node = parser.parse_tuple() 601 return node
If any of the tags
matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
Inherited Members
604class LoopControlExtension(Extension): 605 """Adds break and continue to the template engine.""" 606 607 tags = {"break", "continue"} 608 609 def parse(self, parser: "Parser") -> t.Union[nodes.Break, nodes.Continue]: 610 token = next(parser.stream) 611 if token.value == "break": 612 return nodes.Break(lineno=token.lineno) 613 return nodes.Continue(lineno=token.lineno)
Adds break and continue to the template engine.
609 def parse(self, parser: "Parser") -> t.Union[nodes.Break, nodes.Continue]: 610 token = next(parser.stream) 611 if token.value == "break": 612 return nodes.Break(lineno=token.lineno) 613 return nodes.Continue(lineno=token.lineno)
If any of the tags
matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
Inherited Members
616class DebugExtension(Extension): 617 """A ``{% debug %}`` tag that dumps the available variables, 618 filters, and tests. 619 620 .. code-block:: html+jinja 621 622 <pre>{% debug %}</pre> 623 624 .. code-block:: text 625 626 {'context': {'cycler': <class 'jinja2.utils.Cycler'>, 627 ..., 628 'namespace': <class 'jinja2.utils.Namespace'>}, 629 'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd', 630 ..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'], 631 'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined', 632 ..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']} 633 634 .. versionadded:: 2.11.0 635 """ 636 637 tags = {"debug"} 638 639 def parse(self, parser: "Parser") -> nodes.Output: 640 lineno = parser.stream.expect("name:debug").lineno 641 context = nodes.ContextReference() 642 result = self.call_method("_render", [context], lineno=lineno) 643 return nodes.Output([result], lineno=lineno) 644 645 def _render(self, context: Context) -> str: 646 result = { 647 "context": context.get_all(), 648 "filters": sorted(self.environment.filters.keys()), 649 "tests": sorted(self.environment.tests.keys()), 650 } 651 652 # Set the depth since the intent is to show the top few names. 653 return pprint.pformat(result, depth=3, compact=True)
A {% debug %}
tag that dumps the available variables,
filters, and tests.
<pre>{% debug %}</pre>
{'context': {'cycler': <class 'jinja2.utils.Cycler'>,
...,
'namespace': <class 'jinja2.utils.Namespace'>},
'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
New in version 2.11.0.
639 def parse(self, parser: "Parser") -> nodes.Output: 640 lineno = parser.stream.expect("name:debug").lineno 641 context = nodes.ContextReference() 642 result = self.call_method("_render", [context], lineno=lineno) 643 return nodes.Output([result], lineno=lineno)
If any of the tags
matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
Inherited Members
656def extract_from_ast( 657 ast: nodes.Template, 658 gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS, 659 babel_style: bool = True, 660) -> t.Iterator[ 661 t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]] 662]: 663 """Extract localizable strings from the given template node. Per 664 default this function returns matches in babel style that means non string 665 parameters as well as keyword arguments are returned as `None`. This 666 allows Babel to figure out what you really meant if you are using 667 gettext functions that allow keyword arguments for placeholder expansion. 668 If you don't want that behavior set the `babel_style` parameter to `False` 669 which causes only strings to be returned and parameters are always stored 670 in tuples. As a consequence invalid gettext calls (calls without a single 671 string parameter or string parameters after non-string parameters) are 672 skipped. 673 674 This example explains the behavior: 675 676 >>> from jinja2 import Environment 677 >>> env = Environment() 678 >>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}') 679 >>> list(extract_from_ast(node)) 680 [(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))] 681 >>> list(extract_from_ast(node, babel_style=False)) 682 [(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))] 683 684 For every string found this function yields a ``(lineno, function, 685 message)`` tuple, where: 686 687 * ``lineno`` is the number of the line on which the string was found, 688 * ``function`` is the name of the ``gettext`` function used (if the 689 string was extracted from embedded Python code), and 690 * ``message`` is the string, or a tuple of strings for functions 691 with multiple string arguments. 692 693 This extraction function operates on the AST and is because of that unable 694 to extract any comments. For comment support you have to use the babel 695 extraction interface or extract comments yourself. 696 """ 697 out: t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]] 698 699 for node in ast.find_all(nodes.Call): 700 if ( 701 not isinstance(node.node, nodes.Name) 702 or node.node.name not in gettext_functions 703 ): 704 continue 705 706 strings: t.List[t.Optional[str]] = [] 707 708 for arg in node.args: 709 if isinstance(arg, nodes.Const) and isinstance(arg.value, str): 710 strings.append(arg.value) 711 else: 712 strings.append(None) 713 714 for _ in node.kwargs: 715 strings.append(None) 716 if node.dyn_args is not None: 717 strings.append(None) 718 if node.dyn_kwargs is not None: 719 strings.append(None) 720 721 if not babel_style: 722 out = tuple(x for x in strings if x is not None) 723 724 if not out: 725 continue 726 else: 727 if len(strings) == 1: 728 out = strings[0] 729 else: 730 out = tuple(strings) 731 732 yield node.lineno, node.node.name, out
Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as None
. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the babel_style
parameter to False
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a (lineno, function,
message)
tuple, where:
lineno
is the number of the line on which the string was found,function
is the name of thegettext
function used (if the string was extracted from embedded Python code), andmessage
is the string, or a tuple of strings for functions with multiple string arguments.
This extraction function operates on the AST and is because of that unable to extract any comments. For comment support you have to use the babel extraction interface or extract comments yourself.
775def babel_extract( 776 fileobj: t.BinaryIO, 777 keywords: t.Sequence[str], 778 comment_tags: t.Sequence[str], 779 options: t.Dict[str, t.Any], 780) -> t.Iterator[ 781 t.Tuple[ 782 int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]], t.List[str] 783 ] 784]: 785 """Babel extraction method for Jinja templates. 786 787 .. versionchanged:: 2.3 788 Basic support for translation comments was added. If `comment_tags` 789 is now set to a list of keywords for extraction, the extractor will 790 try to find the best preceding comment that begins with one of the 791 keywords. For best results, make sure to not have more than one 792 gettext call in one line of code and the matching comment in the 793 same line or the line before. 794 795 .. versionchanged:: 2.5.1 796 The `newstyle_gettext` flag can be set to `True` to enable newstyle 797 gettext calls. 798 799 .. versionchanged:: 2.7 800 A `silent` option can now be provided. If set to `False` template 801 syntax errors are propagated instead of being ignored. 802 803 :param fileobj: the file-like object the messages should be extracted from 804 :param keywords: a list of keywords (i.e. function names) that should be 805 recognized as translation functions 806 :param comment_tags: a list of translator tags to search for and include 807 in the results. 808 :param options: a dictionary of additional options (optional) 809 :return: an iterator over ``(lineno, funcname, message, comments)`` tuples. 810 (comments will be empty currently) 811 """ 812 extensions: t.Dict[t.Type[Extension], None] = {} 813 814 for extension_name in options.get("extensions", "").split(","): 815 extension_name = extension_name.strip() 816 817 if not extension_name: 818 continue 819 820 extensions[import_string(extension_name)] = None 821 822 if InternationalizationExtension not in extensions: 823 extensions[InternationalizationExtension] = None 824 825 def getbool(options: t.Mapping[str, str], key: str, default: bool = False) -> bool: 826 return options.get(key, str(default)).lower() in {"1", "on", "yes", "true"} 827 828 silent = getbool(options, "silent", True) 829 environment = Environment( 830 options.get("block_start_string", defaults.BLOCK_START_STRING), 831 options.get("block_end_string", defaults.BLOCK_END_STRING), 832 options.get("variable_start_string", defaults.VARIABLE_START_STRING), 833 options.get("variable_end_string", defaults.VARIABLE_END_STRING), 834 options.get("comment_start_string", defaults.COMMENT_START_STRING), 835 options.get("comment_end_string", defaults.COMMENT_END_STRING), 836 options.get("line_statement_prefix") or defaults.LINE_STATEMENT_PREFIX, 837 options.get("line_comment_prefix") or defaults.LINE_COMMENT_PREFIX, 838 getbool(options, "trim_blocks", defaults.TRIM_BLOCKS), 839 getbool(options, "lstrip_blocks", defaults.LSTRIP_BLOCKS), 840 defaults.NEWLINE_SEQUENCE, 841 getbool(options, "keep_trailing_newline", defaults.KEEP_TRAILING_NEWLINE), 842 tuple(extensions), 843 cache_size=0, 844 auto_reload=False, 845 ) 846 847 if getbool(options, "trimmed"): 848 environment.policies["ext.i18n.trimmed"] = True 849 if getbool(options, "newstyle_gettext"): 850 environment.newstyle_gettext = True # type: ignore 851 852 source = fileobj.read().decode(options.get("encoding", "utf-8")) 853 try: 854 node = environment.parse(source) 855 tokens = list(environment.lex(environment.preprocess(source))) 856 except TemplateSyntaxError: 857 if not silent: 858 raise 859 # skip templates with syntax errors 860 return 861 862 finder = _CommentFinder(tokens, comment_tags) 863 for lineno, func, message in extract_from_ast(node, keywords): 864 yield lineno, func, message, finder.find_comments(lineno)
Babel extraction method for Jinja templates.
Changed in version 2.3:
Basic support for translation comments was added. If comment_tags
is now set to a list of keywords for extraction, the extractor will
try to find the best preceding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
Changed in version 2.5.1:
The newstyle_gettext
flag can be set to True
to enable newstyle
gettext calls.
Changed in version 2.7:
A silent
option can now be provided. If set to False
template
syntax errors are propagated instead of being ignored.
Parameters
- fileobj: the file-like object the messages should be extracted from
- keywords: a list of keywords (i.e. function names) that should be recognized as translation functions
- comment_tags: a list of translator tags to search for and include in the results.
- options: a dictionary of additional options (optional)
Returns
an iterator over
(lineno, funcname, message, comments)
tuples. (comments will be empty currently)