Validate Python TypedDict at runtime - django-rest-framework

I'm working in a Python 3.8+ Django/Rest-Framework environment enforcing types in new code but built on a lot of untyped legacy code and data. We are using TypedDicts extensively for ensuring that data we are generating passes to our TypeScript front-end with the proper data type.
MyPy/PyCharm/etc. does a great job of checking that our new code spits out data that conforms, but we want to test that the output of our many RestSerializers/ModelSerializers fits the TypeDict. If I have a serializer and typed dict like:
class PersonSerializer(ModelSerializer):
class Meta:
model = Person
fields = ['first', 'last']
class PersonData(TypedDict):
first: str
last: str
email: str
and then run code like:
person_dict: PersonData = PersonSerializer(Person.objects.first()).data
Static type checkers don't be able to figure out that person_dict is missing the required email key, because (by design of PEP-589) it is just a normal dict. But I can write something like:
annotations = PersonData.__annotations__
for k in annotations:
assert k in person_dict # or something more complex.
assert isinstance(person_dict[k], annotations[k])
and it will find that email is missing from the data of the serializer. This is well and good in this case, where I don't have any changes introduced by from __future__ import annotations (not sure if this would break it), and all my type annotations are bare types. But if PersonData were defined like:
class PersonData(TypedDict):
email: Optional[str]
affiliations: Union[List[str], Dict[int, str]]
then isinstance is not good enough to check if the data passes (since "Subscripted generics cannot be used with class and instance checks").
What I'm wondering is if there already exists a callable function/method (in mypy or another checker) that would allow me to validate a TypedDict (or even a single variable, since I can iterate a dict myself) against an annotation and see if it validates?
I'm not concerned about speed, etc., since the point of this is to check all our data/methods/functions once and then remove the checks later once we're happy that our current data validates.

The simplest solution I found works using pydantic.
from typing import cast, TypedDict
import pydantic
class SomeDict(TypedDict):
val: int
name: str
# this could be a valid/invalid declaration
obj: SomeDict = {
'val': 12,
'name': 'John',
}
# validate with pydantic
try:
obj = cast(SomeDict, pydantic.create_model_from_typeddict(SomeDict)(**obj).dict())
except pydantic.ValidationError as exc:
print(f"ERROR: Invalid schema: {exc}")
EDIT: When type checking this, it currently returns an error, but works as expected. See here: https://github.com/samuelcolvin/pydantic/issues/3008

You may want to have a look at https://pypi.org/project/strongtyping/. This may help.
In the docs you can find this example:
from typing import List, TypedDict
from strongtyping.strong_typing import match_class_typing
#match_class_typing
class SalesSummary(TypedDict):
sales: int
country: str
product_codes: List[str]
# works like expected
SalesSummary({"sales": 10, "country": "Foo", "product_codes": ["1", "2", "3"]})
# will raise a TypeMisMatch
SalesSummary({"sales": "Foo", "country": 10, "product_codes": [1, 2, 3]})

A little bit of a hack, but you can check two types using mypy command line -c options. Just wrap it in a python function:
import subprocess
def is_assignable(type_to, type_from) -> bool:
"""
Returns true if `type_from` can be assigned to `type_to`,
e. g. type_to := type_from
Example:
>>> is_assignable(bool, str)
False
>>> from typing import *
>>> is_assignable(Union[List[str], Dict[int, str]], List[str])
True
"""
code = "\n".join((
f"import typing",
f"type_to: {type_to}",
f"type_from: {type_from}",
f"type_to = type_from",
))
return subprocess.call(("mypy", "-c", code)) == 0

You could do something like this:
def validate(typ: Any, instance: Any) -> bool:
for property_name, property_type in typ.__annotations__.items():
value = instance.get(property_name, None)
if value is None:
# Check for missing keys
print(f"Missing key: {property_name}")
return False
elif property_type not in (int, float, bool, str):
# check if property_type is object (e.g. not a primitive)
result = validate(property_type, value)
if result is False:
return False
elif not isinstance(value, property_type):
# Check for type equality
print(f"Wrong type: {property_name}. Expected {property_type}, got {type(value)}")
return False
return True
And then test some object, e.g. one that was passed to your REST endpoint:
class MySubModel(TypedDict):
subfield: bool
class MyModel(TypedDict):
first: str
last: str
email: str
sub: MySubModel
m = {
'email': 'JohnDoeAtDoeishDotCom',
'first': 'John'
}
assert validate(MyModel, m) is False
This one prints the first error and returns bool, you could change that to exceptions, possibly with all the missing keys. You could also extend it to fail on additional keys than defined by the model.

I like your solution!. In order to avoid iteration fixes for some user, I added some code to your solution :D
def validate_custom_typed_dict(instance: Any, custom_typed_dict:TypedDict) -> bool|Exception:
key_errors = []
type_errors = []
for property_name, type_ in my_typed_dict.__annotations__.items():
value = instance.get(property_name, None)
if value is None:
# Check for missing keys
key_errors.append(f"\t- Missing property: '{property_name}' \n")
elif type_ not in (int, float, bool, str):
# check if type is object (e.g. not a primitive)
result = validate_custom_typed_dict(type_, value)
if result is False:
type_errors.append(f"\t- '{property_name}' expected {type_}, got {type(value)}\n")
elif not isinstance(value, type_):
# Check for type equality
type_errors.append(f"\t- '{property_name}' expected {type_}, got {type(value)}\n")
if len(key_errors) > 0 or len(type_errors) > 0:
error_message = f'\n{"".join(key_errors)}{"".join(type_errors)}'
raise Exception(error_message)
return True
some console output:
Exception:
- Missing property: 'Combined_cycle'
- Missing property: 'Solar_PV'
- Missing property: 'Hydro'
- 'timestamp' expected <class 'str'>, got <class 'int'>
- 'Diesel_engines' expected <class 'float'>, got <class 'int'>

Related

Vertex AI Pipelines (Kubeflow) skip step with dependent outputs on later step

I’m trying to run a Vertex AI Pipelines job where I skip a certain pipeline step if the value of a certain pipeline parameter (in this case do_task1) is False. But because there is another step that runs unconditionally and expects the output of the first potentially skipped step, I get the following error, independently of do_task1 being True or False:
AssertionError: component_input_artifact: pipelineparam--task1-output_path not found. All inputs: parameters {
key: "do_task1"
value {
type: STRING
}
}
parameters {
key: "task1_name"
value {
type: STRING
}
}
It seems like the compiler just cannot find the output output_path from task1. So I wonder if there is any way to have some sort of placeholders for the outputs of those steps that are under a dsl.Condition , and thus they get filled with default values unless the actual steps run and fill them with the non-default values.
The code below represents the problem and is easily reproducible.
I'm using google-cloud-aiplatform==1.14.0 and kfp==1.8.11
from typing import NamedTuple
from kfp import dsl
from kfp.v2.dsl import Dataset, Input, OutputPath, component
from kfp.v2 import compiler
from google.cloud.aiplatform import pipeline_jobs
#component(
base_image="python:3.9",
packages_to_install=["pandas"]
)
def task1(
# inputs
task1_name: str,
# outputs
output_path: OutputPath("Dataset"),
) -> NamedTuple("Outputs", [("output_1", str), ("output_2", int)]):
import pandas as pd
output_1 = task1_name + "-processed"
output_2 = 2
df_output_1 = pd.DataFrame({"output_1": [output_1]})
df_output_1.to_csv(output_path, index=False)
return (output_1, output_2)
#component(
base_image="python:3.9",
packages_to_install=["pandas"]
)
def task2(
# inputs
task1_output: Input[Dataset],
) -> str:
import pandas as pd
task1_input = pd.read_csv(task1_output.path).values[0][0]
return task1_input
#dsl.pipeline(
pipeline_root='pipeline_root',
name='pipelinename',
)
def pipeline(
do_task1: bool,
task1_name: str,
):
with dsl.Condition(do_task1 == True):
task1_op = (
task1(
task1_name=task1_name,
)
)
task2_op = (
task2(
task1_output=task1_op.outputs["output_path"],
)
)
if __name__ == '__main__':
do_task1 = True # <------------ The variable to modify ---------------
# compile pipeline
compiler.Compiler().compile(
pipeline_func=pipeline, package_path='pipeline.json')
# create pipeline run
pipeline_run = pipeline_jobs.PipelineJob(
display_name='pipeline-display-name',
pipeline_root='pipelineroot',
job_id='pipeline-job-id',
template_path='pipelinename.json',
parameter_values={
'do_task1': do_task1, # pipeline compilation fails with either True or False values
'task1_name': 'Task 1',
},
enable_caching=False
)
# execute pipeline run
pipeline_run.run()
Any help is much appreciated!
The real issue here is with dsl.Condition(): creates a sub group, where task1_op is an inner task only "visible" from within the sub group. In the latest SDK, it will throw a more explicit error message saying task2 cannot depends on any inner task.
So to resolve the issue, you just need to move task2 to be within the condition--if condition was not met, you don't have a valid input to feed into task2 anyway.
with dsl.Condition(do_task1 == True):
task1_op = (
task1(
task1_name=task1_name,
)
)
task2_op = (
task2(
task1_output=task1_op.outputs["output_path"],
)
)

MyST-Parser: Auto linking / linkifying references to bug tracker issues

I use sphinx w/ MyST-Parser for markdown, and
I want GitHub or GitLab-style auto linking (linkfying) for references.
Is there a way to have MyST render the reference:
#346
In docutils-speak, this is a Text node (example)
And behave as if it was:
[#346](https://github.com/vcs-python/libvcs/pull/346)
So when rendered it'd be like:
#346
Not the custom role:
{issue}`1` <- Not this
Another example: Linkifying the reference #user to a GitHub, GitLab, StackOverflow user.
What I'm currently doing (and why it doesn't work)
Right now I'm using the canonical solution docutils offers: custom roles.
I use sphinx-issues (PyPI), and does just that. It uses a sphinx setting variable, issues_github_path to parse the URL:
e.g. in Sphinx configuration conf.py:
issues_github_path = 'vcs-python/libvcs'
reStructuredText:
:issue:`346`
MyST-Parser:
{issue}`346`
Why custom roles don't work
Sadly, those aren't bi-directional with GitHub/GitLab/tools. If you copy/paste MyST-Parser -> GitHub/GitLab or preview it directly, it looks very bad:
Example of CHANGES:
Example issue: https://github.com/vcs-python/libvcs/issues/363
What we want is to just be able to copy markdown including #347 to and from.
Does a solution already exist?
Are there any projects out there of docutils or sphinx plugins to turn #username or #issues into links?
sphinx (at least) can demonstrable do so for custom roles - as seen in sphinx-issues usage of issues_github_path - by using project configuration context.
MyST-Parser has a linkify extension which uses linkify-it-py
This can turn https://www.google.com into https://www.google.com and not need to use <https://www.google.com>.
Therefore, there may already be a tool out there.
Can it be done through the API?
The toolchain for myst, sphinx and docutils is robust. This is a special case.
This needs to be done at the Text node level. Custom role won't work - as stated above - since it'll create markdown that can't be copied between GitLab and GitHub issues trivially.
The stack:
MyST-Parser API (Markdown-it-py API) > Sphinx APIs (MySTParser + Sphinx) > Docutils API
At the time of writing, I'm using Sphinx 4.3.2, MyST-Parser 0.17.2, and docutils 0.17.1 on python 3.10.2.
Notes
For the sake of an example, I'm using an open source project of mine that is facing this issue.
This is only about autolinking issues or usernames - things that'd easily be mappable to URLs. autodoc code-linking is out of scope.
There is a (defunct) project that does this: sphinxcontrib-issuetracker.
I've rebooted it:
conf.py:
import sys
from pathlib import Path
cwd = Path(__file__).parent
project_root = cwd.parent
sys.path.insert(0, str(project_root))
sys.path.insert(0, str(cwd / "_ext"))
extensions = [
"link_issues",
]
# issuetracker
issuetracker = "github"
issuetracker_project = "cihai/unihan-etl" # e.g. for https://github.com/cihai/unihan-etl
_ext/link_issues.py:
"""Issue linking w/ plain-text autolinking, e.g. #42
Credit: https://github.com/ignatenkobrain/sphinxcontrib-issuetracker
License: BSD
Changes by Tony Narlock (2022-08-21):
- Type annotations
mypy --strict, requires types-requests, types-docutils
Python < 3.10 require typing-extensions
- TrackerConfig: Use dataclasses instead of typing.NamedTuple and hacking __new__
- app.warn (removed in 5.0) -> Use Sphinx Logging API
https://www.sphinx-doc.org/en/master/extdev/logging.html#logging-api
- Add PendingIssueXRef
Typing for tracker_config and precision
- Add IssueTrackerBuildEnvironment
Subclassed / typed BuildEnvironment with .tracker_config
- Just GitHub (for demonstration)
"""
import dataclasses
import re
import sys
import time
import typing as t
import requests
from docutils import nodes
from sphinx.addnodes import pending_xref
from sphinx.application import Sphinx
from sphinx.config import Config
from sphinx.environment import BuildEnvironment
from sphinx.transforms import SphinxTransform
from sphinx.util import logging
if t.TYPE_CHECKING:
if sys.version_info >= (3, 10):
from typing import TypeGuard
else:
from typing_extensions import TypeGuard
logger = logging.getLogger(__name__)
GITHUB_API_URL = "https://api.github.com/repos/{0.project}/issues/{1}"
class IssueTrackerBuildEnvironment(BuildEnvironment):
tracker_config: "TrackerConfig"
issuetracker_cache: "IssueTrackerCache"
github_rate_limit: t.Tuple[float, bool]
class Issue(t.NamedTuple):
id: str
title: str
url: str
closed: bool
IssueTrackerCache = t.Dict[str, Issue]
#dataclasses.dataclass
class TrackerConfig:
project: str
url: str
"""
Issue tracker configuration.
This class provides configuration for trackers, and is passed as
``tracker_config`` arguments to callbacks of
:event:`issuetracker-lookup-issue`.
"""
def __post_init__(self) -> None:
if self.url is not None:
self.url = self.url.rstrip("/")
#classmethod
def from_sphinx_config(cls, config: Config) -> "TrackerConfig":
"""
Get tracker configuration from ``config``.
"""
project = config.issuetracker_project or config.project
url = config.issuetracker_url
return cls(project=project, url=url)
class PendingIssueXRef(pending_xref):
tracker_config: TrackerConfig
class IssueReferences(SphinxTransform):
default_priority = 999
def apply(self) -> None:
config = self.document.settings.env.config
tracker_config = TrackerConfig.from_sphinx_config(config)
issue_pattern = config.issuetracker_issue_pattern
title_template = None
if isinstance(issue_pattern, str):
issue_pattern = re.compile(issue_pattern)
for node in self.document.traverse(nodes.Text):
parent = node.parent
if isinstance(parent, (nodes.literal, nodes.FixedTextElement)):
# ignore inline and block literal text
continue
if isinstance(parent, nodes.reference):
continue
text = str(node)
new_nodes = []
last_issue_ref_end = 0
for match in issue_pattern.finditer(text):
# catch invalid pattern with too many groups
if len(match.groups()) != 1:
raise ValueError(
"issuetracker_issue_pattern must have "
"exactly one group: {0!r}".format(match.groups())
)
# extract the text between the last issue reference and the
# current issue reference and put it into a new text node
head = text[last_issue_ref_end : match.start()]
if head:
new_nodes.append(nodes.Text(head))
# adjust the position of the last issue reference in the
# text
last_issue_ref_end = match.end()
# extract the issue text (including the leading dash)
issuetext = match.group(0)
# extract the issue number (excluding the leading dash)
issue_id = match.group(1)
# turn the issue reference into a reference node
refnode = PendingIssueXRef()
refnode["refdomain"] = None
refnode["reftarget"] = issue_id
refnode["reftype"] = "issue"
refnode["trackerconfig"] = tracker_config
reftitle = title_template or issuetext
refnode.append(
nodes.inline(issuetext, reftitle, classes=["xref", "issue"])
)
new_nodes.append(refnode)
if not new_nodes:
# no issue references were found, move on to the next node
continue
# extract the remaining text after the last issue reference, and
# put it into a text node
tail = text[last_issue_ref_end:]
if tail:
new_nodes.append(nodes.Text(tail))
# find and remove the original node, and insert all new nodes
# instead
parent.replace(node, new_nodes)
def is_issuetracker_env(
env: t.Any,
) -> "TypeGuard['IssueTrackerBuildEnvironment']":
return hasattr(env, "issuetracker_cache") and env.issuetracker_cache is not None
def lookup_issue(
app: Sphinx, tracker_config: TrackerConfig, issue_id: str
) -> t.Optional[Issue]:
"""
Lookup the given issue.
The issue is first looked up in an internal cache. If it is not found, the
event ``issuetracker-lookup-issue`` is emitted. The result of this
invocation is then cached and returned.
``app`` is the sphinx application object. ``tracker_config`` is the
:class:`TrackerConfig` object representing the issue tracker configuration.
``issue_id`` is a string containing the issue id.
Return a :class:`Issue` object for the issue with the given ``issue_id``,
or ``None`` if the issue wasn't found.
"""
env = app.env
if is_issuetracker_env(env):
cache: IssueTrackerCache = env.issuetracker_cache
if issue_id not in cache:
issue = app.emit_firstresult(
"issuetracker-lookup-issue", tracker_config, issue_id
)
cache[issue_id] = issue
return cache[issue_id]
return None
def lookup_issues(app: Sphinx, doctree: nodes.document) -> None:
"""
Lookup issues found in the given ``doctree``.
Each issue reference in the given ``doctree`` is looked up. Each lookup
result is cached by mapping the referenced issue id to the looked up
:class:`Issue` object (an existing issue) or ``None`` (a missing issue).
The cache is available at ``app.env.issuetracker_cache`` and is pickled
along with the environment.
"""
for node in doctree.traverse(PendingIssueXRef):
if node["reftype"] == "issue":
lookup_issue(app, node["trackerconfig"], node["reftarget"])
def make_issue_reference(issue: Issue, content_node: nodes.inline) -> nodes.reference:
"""
Create a reference node for the given issue.
``content_node`` is a docutils node which is supposed to be added as
content of the created reference. ``issue`` is the :class:`Issue` which
the reference shall point to.
Return a :class:`docutils.nodes.reference` for the issue.
"""
reference = nodes.reference()
reference["refuri"] = issue.url
if issue.title:
reference["reftitle"] = issue.title
if issue.closed:
content_node["classes"].append("closed")
reference.append(content_node)
return reference
def resolve_issue_reference(
app: Sphinx, env: BuildEnvironment, node: PendingIssueXRef, contnode: nodes.inline
) -> t.Optional[nodes.reference]:
"""
Resolve an issue reference and turn it into a real reference to the
corresponding issue.
``app`` and ``env`` are the Sphinx application and environment
respectively. ``node`` is a ``pending_xref`` node representing the missing
reference. It is expected to have the following attributes:
- ``reftype``: The reference type
- ``trackerconfig``: The :class:`TrackerConfig`` to use for this node
- ``reftarget``: The issue id
- ``classes``: The node classes
References with a ``reftype`` other than ``'issue'`` are skipped by
returning ``None``. Otherwise the new node is returned.
If the referenced issue was found, a real reference to this issue is
returned. The text of this reference is formatted with the :class:`Issue`
object available in the ``issue`` key. The reference title is set to the
issue title. If the issue is closed, the class ``closed`` is added to the
new content node.
Otherwise, if the issue was not found, the content node is returned.
"""
if node["reftype"] != "issue":
return None
issue = lookup_issue(app, node["trackerconfig"], node["reftarget"])
if issue is None:
return contnode
else:
classes = contnode["classes"]
conttext = str(contnode[0])
formatted_conttext = nodes.Text(conttext.format(issue=issue))
formatted_contnode = nodes.inline(conttext, formatted_conttext, classes=classes)
assert issue is not None
return make_issue_reference(issue, formatted_contnode)
return None
def init_cache(app: Sphinx) -> None:
if not hasattr(app.env, "issuetracker_cache"):
app.env.issuetracker_cache: "IssueTrackerCache" = {} # type: ignore
return None
def check_project_with_username(tracker_config: TrackerConfig) -> None:
if "/" not in tracker_config.project:
raise ValueError(
"username missing in project name: {0.project}".format(tracker_config)
)
HEADERS = {"User-Agent": "sphinxcontrib-issuetracker v{0}".format("1.0")}
def get(app: Sphinx, url: str) -> t.Optional[requests.Response]:
"""
Get a response from the given ``url``.
``url`` is a string containing the URL to request via GET. ``app`` is the
Sphinx application object.
Return the :class:`~requests.Response` object on status code 200, or
``None`` otherwise. If the status code is not 200 or 404, a warning is
emitted via ``app``.
"""
response = requests.get(url, headers=HEADERS)
if response.status_code == requests.codes.ok:
return response
elif response.status_code != requests.codes.not_found:
msg = "GET {0.url} failed with code {0.status_code}"
logger.warning(msg.format(response))
return None
def lookup_github_issue(
app: Sphinx, tracker_config: TrackerConfig, issue_id: str
) -> t.Optional[Issue]:
check_project_with_username(tracker_config)
env = app.env
if is_issuetracker_env(env):
# Get rate limit information from the environment
timestamp, limit_hit = getattr(env, "github_rate_limit", (0, False))
if limit_hit and time.time() - timestamp > 3600:
# Github limits applications hourly
limit_hit = False
if not limit_hit:
url = GITHUB_API_URL.format(tracker_config, issue_id)
response = get(app, url)
if response:
rate_remaining = response.headers.get("X-RateLimit-Remaining")
assert rate_remaining is not None
if rate_remaining.isdigit() and int(rate_remaining) == 0:
logger.warning("Github rate limit hit")
env.github_rate_limit = (time.time(), True)
issue = response.json()
closed = issue["state"] == "closed"
return Issue(
id=issue_id,
title=issue["title"],
closed=closed,
url=issue["html_url"],
)
else:
logger.warning(
"Github rate limit exceeded, not resolving issue {0}".format(issue_id)
)
return None
BUILTIN_ISSUE_TRACKERS: t.Dict[str, t.Any] = {
"github": lookup_github_issue,
}
def init_transformer(app: Sphinx) -> None:
if app.config.issuetracker_plaintext_issues:
app.add_transform(IssueReferences)
def connect_builtin_tracker(app: Sphinx) -> None:
if app.config.issuetracker:
tracker = BUILTIN_ISSUE_TRACKERS[app.config.issuetracker.lower()]
app.connect(str("issuetracker-lookup-issue"), tracker)
def setup(app: Sphinx) -> t.Dict[str, t.Any]:
app.add_config_value("mybase", "https://github.com/cihai/unihan-etl", "env")
app.add_event(str("issuetracker-lookup-issue"))
app.connect(str("builder-inited"), connect_builtin_tracker)
app.add_config_value("issuetracker", None, "env")
app.add_config_value("issuetracker_project", None, "env")
app.add_config_value("issuetracker_url", None, "env")
# configuration specific to plaintext issue references
app.add_config_value("issuetracker_plaintext_issues", True, "env")
app.add_config_value(
"issuetracker_issue_pattern",
re.compile(
r"#(\d+)",
),
"env",
)
app.add_config_value("issuetracker_title_template", None, "env")
app.connect(str("builder-inited"), init_cache)
app.connect(str("builder-inited"), init_transformer)
app.connect(str("doctree-read"), lookup_issues)
app.connect(str("missing-reference"), resolve_issue_reference)
return {
"version": "1.0",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
Mirrors
https://gist.github.com/tony/05a3043d97d37c158763fb2f6a2d5392
https://github.com/ignatenkobrain/sphinxcontrib-issuetracker/issues/25
Mypy users
mypy --strict docs/_ext/link_issues.py work as of mypy 0.971
If you use mypy: pip install types-docutils types-requests
Install:
https://pypi.org/project/types-docutils/
https://pypi.org/project/types-requests/
https://pypi.org/project/typing-extensions/ (Python <3.10)
Example
via unihan-etl#261 / v0.17.2 (source, view, but page may be outdated)

pyyaml parse data with tag

I have yaml data like the input below and i need output as key value pairs
Input
a="""
--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess
code:
- '716'
- '718'
id:
- 488
- 499
"""
ouput needed
{'code': ['716', '718'], 'id': [488, 499]}
The default constructor was giving me an error. I tried adding new constructor and now its not giving me error but i am not able to get key value pairs.
FYI, If i remove the !ruby/hash:ActiveSupport::HashWithIndifferentAccess line from my yaml then it gives me desired output.
def new_constructor(loader, tag_suffix, node):
if type(node.value)=='list':
val=''.join(node.value)
else:
val=node.value
val=node.value
ret_val="""
{0}
""".format(val)
return ret_val
yaml.add_multi_constructor('', new_constructor)
yaml.load(a)
output
"\n [(ScalarNode(tag=u'tag:yaml.org,2002:str', value=u'code'), SequenceNode(tag=u'tag:yaml.org,2002:seq', value=[ScalarNode(tag=u'tag:yaml.org,2002:str', value=u'716'), ScalarNode(tag=u'tag:yaml.org,2002:str', value=u'718')])), (ScalarNode(tag=u'tag:yaml.org,2002:str', value=u'id'), SequenceNode(tag=u'tag:yaml.org,2002:seq', value=[ScalarNode(tag=u'tag:yaml.org,2002:int', value=u'488'), ScalarNode(tag=u'tag:yaml.org,2002:int', value=u'499')]))]\n "
Please suggest.
This is not a solution using PyYAML, but I recommend using ruamel.yaml instead. If for no other reason, it's more actively maintained than PyYAML. A quote from the overview
Many of the bugs filed against PyYAML, but that were never acted upon, have been fixed in ruamel.yaml
To load that string, you can do
import ruamel.yaml
parser = ruamel.yaml.YAML()
obj = parser.load(a) # as defined above.
I strongly recommend following #Andrew F answer, but in case you
wonder why your code did not get the proper result, that is because
you don't correctly process the node under the tag in your tag
handling.
Although the node's value is a list (of tuples with key value pairs),
you should test for the type of the node itself (using isinstance)
and then hand it over to the "normal" mapping processing routine as
the tag is on a mapping:
import yaml
from yaml.loader import SafeLoader
a = """\
--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess
code:
- '716'
- '718'
id:
- 488
- 499
"""
def new_constructor(loader, tag_suffix, node):
if isinstance(node, yaml.nodes.MappingNode):
return loader.construct_mapping(node, deep=True)
raise NotImplementedError
yaml.add_multi_constructor('', new_constructor, Loader=SafeLoader)
data = yaml.load(a, Loader=SafeLoader)
print(data)
which gives:
{'code': ['716', '718'], 'id': [488, 499]}
You should not use PyYAML's yaml.load(), it is documented to be potentially unsafe
and above all it is not necessary. Just add the new constructor to the SafeLoader.

Rscript : Why is Error in UseMethod("extract_") : being indicated when attempting to use raster::extract?

I attempting to use raster package's extract method to extract values from a Raster* object.
RStudioPrompt> jpnpe <- extract(jpnp, jpnb, fun = mean, na.rm = T)
where jpnp is the raster object and jpnb is SpatialPolygonsDataFrame
However the following error is indicated:
Error in UseMethod("extract_") :
no applicable method for 'extract_' applied to an object of class "c('RasterStack', 'Raster', 'RasterStackBrick', 'BasicRaster')"
How can I get passed this error?
Issue may be due to having another package with the same method name, obfuscating the raster extract method.
The tidyr package has an extract method which may conflict with raster's extract method.
Confirm by checking libraries loaded by doing:
>search()
[1] ".GlobalEnv" **"package:tidyr"** "package:dplyr"
[4] "package:rgeos" "package:ggplot2" "package:RColorBrewer"
[7] "package:animation" "package:rgdal" "package:maptools"
[10] **"package:raster"** "package:sp" "tools:rstudio"
[13] "package:stats" "package:graphics" "package:grDevices"
[16] "package:utils" "package:datasets" "package:methods"
[19] "Autoloads" "package:base"
you can also check which extract method is being loaded by typing name of function without brackets (as below, the environment will tell you which package is being used):
> extract
function (data, col, into, regex = "([[:alnum:]]+)", remove = TRUE,
convert = FALSE, ...)
{
col <- col_name(substitute(col))
extract_(data, col, into, regex = regex, remove = remove,
convert = convert, ...)
}
<environment: namespace:tidyr>
To resolve the error just unload the offending package, in RStudio you can use the following command:
>.rs.unloadPackage("tidyr")
and re-execute the raster extract method:
>jpnpe <- extract(jpnp, jpnb, fun = mean, na.rm = T)

How can I include a YAML file inside another?

So I have two YAML files, "A" and "B" and I want the contents of A to be inserted inside B, either spliced into the existing data structure, like an array, or as a child of an element, like the value for a certain hash key.
Is this possible at all? How? If not, any pointers to a normative reference?
No, standard YAML does not include any kind of "import" or "include" statement.
Your question does not ask for a Python solution, but here is one using PyYAML.
PyYAML allows you to attach custom constructors (such as !include) to the YAML loader. I've included a root directory that can be set so that this solution supports relative and absolute file references.
Class-Based Solution
Here is a class-based solution, that avoids the global root variable of my original response.
See this gist for a similar, more robust Python 3 solution that uses a metaclass to register the custom constructor.
import yaml
import os
class Loader(yaml.SafeLoader):
def __init__(self, stream):
self._root = os.path.split(stream.name)[0]
super(Loader, self).__init__(stream)
def include(self, node):
filename = os.path.join(self._root, self.construct_scalar(node))
with open(filename, 'r') as f:
return yaml.load(f, Loader)
Loader.add_constructor('!include', Loader.include)
An example:
foo.yaml
a: 1
b:
- 1.43
- 543.55
c: !include bar.yaml
bar.yaml
- 3.6
- [1, 2, 3]
Now the files can be loaded using:
>>> with open('foo.yaml', 'r') as f:
>>> data = yaml.load(f, Loader)
>>> data
{'a': 1, 'b': [1.43, 543.55], 'c': [3.6, [1, 2, 3]]}
For Python users, you can try pyyaml-include.
Install
pip install pyyaml-include
Usage
import yaml
from yamlinclude import YamlIncludeConstructor
YamlIncludeConstructor.add_to_loader_class(loader_class=yaml.FullLoader, base_dir='/your/conf/dir')
with open('0.yaml') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
print(data)
Consider we have such YAML files:
├── 0.yaml
└── include.d
├── 1.yaml
└── 2.yaml
1.yaml 's content:
name: "1"
2.yaml 's content:
name: "2"
Include files by name
On top level:
If 0.yaml was:
!include include.d/1.yaml
We'll get:
{"name": "1"}
In mapping:
If 0.yaml was:
file1: !include include.d/1.yaml
file2: !include include.d/2.yaml
We'll get:
file1:
name: "1"
file2:
name: "2"
In sequence:
If 0.yaml was:
files:
- !include include.d/1.yaml
- !include include.d/2.yaml
We'll get:
files:
- name: "1"
- name: "2"
ℹ Note:
File name can be either absolute (like /usr/conf/1.5/Make.yml) or relative (like ../../cfg/img.yml).
Include files by wildcards
File name can contain shell-style wildcards. Data loaded from the file(s) found by wildcards will be set in a sequence.
If 0.yaml was:
files: !include include.d/*.yaml
We'll get:
files:
- name: "1"
- name: "2"
ℹ Note:
For Python>=3.5, if recursive argument of !include YAML tag is true, the pattern “**” will match any files and zero or more directories and subdirectories.
Using the “**” pattern in large directory trees may consume an inordinate amount of time because of recursive search.
In order to enable recursive argument, we shall write the !include tag in Mapping or Sequence mode:
Arguments in Sequence mode:
!include [tests/data/include.d/**/*.yaml, true]
Arguments in Mapping mode:
!include {pathname: tests/data/include.d/**/*.yaml, recursive: true}
Includes are not directly supported in YAML as far as I know, you will have to provide a mechanism yourself however, this is generally easy to do.
I have used YAML as a configuration language in my python apps, and in this case often define a convention like this:
>>> main.yml <<<
includes: [ wibble.yml, wobble.yml]
Then in my (python) code I do:
import yaml
cfg = yaml.load(open("main.yml"))
for inc in cfg.get("includes", []):
cfg.update(yaml.load(open(inc)))
The only down side is that variables in the includes will always override the variables in main, and there is no way to change that precedence by changing where the "includes: statement appears in the main.yml file.
On a slightly different point, YAML doesn't support includes as its not really designed as as exclusively as a file based mark up. What would an include mean if you got it in a response to an AJAX request?
The YML standard does not specify a way to do this. And this problem does not limit itself to YML. JSON has the same limitations.
Many applications which use YML or JSON based configurations run into this problem eventually. And when that happens, they make up their own convention.
e.g. for swagger API definitions:
$ref: 'file.yml'
e.g. for docker compose configurations:
services:
app:
extends:
file: docker-compose.base.yml
Alternatively, if you want to split up the content of a yml file in multiple files, like a tree of content, you can define your own folder-structure convention and use an (existing) merge script.
Expanding on #Josh_Bode's answer, here's my own PyYAML solution, which has the advantage of being a self-contained subclass of yaml.Loader. It doesn't depend on any module-level globals, or on modifying the global state of the yaml module.
import yaml, os
class IncludeLoader(yaml.Loader):
"""
yaml.Loader subclass handles "!include path/to/foo.yml" directives in config
files. When constructed with a file object, the root path for includes
defaults to the directory containing the file, otherwise to the current
working directory. In either case, the root path can be overridden by the
`root` keyword argument.
When an included file F contain its own !include directive, the path is
relative to F's location.
Example:
YAML file /home/frodo/one-ring.yml:
---
Name: The One Ring
Specials:
- resize-to-wearer
Effects:
- !include path/to/invisibility.yml
YAML file /home/frodo/path/to/invisibility.yml:
---
Name: invisibility
Message: Suddenly you disappear!
Loading:
data = IncludeLoader(open('/home/frodo/one-ring.yml', 'r')).get_data()
Result:
{'Effects': [{'Message': 'Suddenly you disappear!', 'Name':
'invisibility'}], 'Name': 'The One Ring', 'Specials':
['resize-to-wearer']}
"""
def __init__(self, *args, **kwargs):
super(IncludeLoader, self).__init__(*args, **kwargs)
self.add_constructor('!include', self._include)
if 'root' in kwargs:
self.root = kwargs['root']
elif isinstance(self.stream, file):
self.root = os.path.dirname(self.stream.name)
else:
self.root = os.path.curdir
def _include(self, loader, node):
oldRoot = self.root
filename = os.path.join(self.root, loader.construct_scalar(node))
self.root = os.path.dirname(filename)
data = yaml.load(open(filename, 'r'))
self.root = oldRoot
return data
With Yglu, you can import other files like this:
A.yaml
foo: !? $import('B.yaml')
B.yaml
bar: Hello
$ yglu A.yaml
foo:
bar: Hello
As $import is a function, you can also pass an expression as argument:
dep: !- b
foo: !? $import($_.dep.toUpper() + '.yaml')
This would give the same output as above.
Disclaimer: I am the author of Yglu.
Standard YAML 1.2 doesn't include natively this feature. Nevertheless many implementations provides some extension to do so.
I present a way of achieving it with Java and snakeyaml:1.24 (Java library to parse/emit YAML files) that allows creating a custom YAML tag to achieve the following goal (you will see I'm using it to load test suites defined in several YAML files and that I made it work as a list of includes for a target test: node):
# ... yaml prev stuff
tests: !include
- '1.hello-test-suite.yaml'
- '3.foo-test-suite.yaml'
- '2.bar-test-suite.yaml'
# ... more yaml document
Here is the one-class Java that allows processing the !include tag. Files are loaded from classpath (Maven resources directory):
/**
* Custom YAML loader. It adds support to the custom !include tag which allows splitting a YAML file across several
* files for a better organization of YAML tests.
*/
#Slf4j // <-- This is a Lombok annotation to auto-generate logger
public class MyYamlLoader {
private static final Constructor CUSTOM_CONSTRUCTOR = new MyYamlConstructor();
private MyYamlLoader() {
}
/**
* Parse the only YAML document in a stream and produce the Java Map. It provides support for the custom !include
* YAML tag to split YAML contents across several files.
*/
public static Map<String, Object> load(InputStream inputStream) {
return new Yaml(CUSTOM_CONSTRUCTOR)
.load(inputStream);
}
/**
* Custom SnakeYAML constructor that registers custom tags.
*/
private static class MyYamlConstructor extends Constructor {
private static final String TAG_INCLUDE = "!include";
MyYamlConstructor() {
// Register custom tags
yamlConstructors.put(new Tag(TAG_INCLUDE), new IncludeConstruct());
}
/**
* The actual include tag construct.
*/
private static class IncludeConstruct implements Construct {
#Override
public Object construct(Node node) {
List<Node> inclusions = castToSequenceNode(node);
return parseInclusions(inclusions);
}
#Override
public void construct2ndStep(Node node, Object object) {
// do nothing
}
private List<Node> castToSequenceNode(Node node) {
try {
return ((SequenceNode) node).getValue();
} catch (ClassCastException e) {
throw new IllegalArgumentException(String.format("The !import value must be a sequence node, but " +
"'%s' found.", node));
}
}
private Object parseInclusions(List<Node> inclusions) {
List<InputStream> inputStreams = inputStreams(inclusions);
try (final SequenceInputStream sequencedInputStream =
new SequenceInputStream(Collections.enumeration(inputStreams))) {
return new Yaml(CUSTOM_CONSTRUCTOR)
.load(sequencedInputStream);
} catch (IOException e) {
log.error("Error closing the stream.", e);
return null;
}
}
private List<InputStream> inputStreams(List<Node> scalarNodes) {
return scalarNodes.stream()
.map(this::inputStream)
.collect(toList());
}
private InputStream inputStream(Node scalarNode) {
String filePath = castToScalarNode(scalarNode).getValue();
final InputStream is = getClass().getClassLoader().getResourceAsStream(filePath);
Assert.notNull(is, String.format("Resource file %s not found.", filePath));
return is;
}
private ScalarNode castToScalarNode(Node scalarNode) {
try {
return ((ScalarNode) scalarNode);
} catch (ClassCastException e) {
throw new IllegalArgumentException(String.format("The value must be a scalar node, but '%s' found" +
".", scalarNode));
}
}
}
}
}
Unfortunately YAML doesn't provide this in its standard.
But if you are using Ruby, there is a gem providing the functionality you are asking for by extending the ruby YAML library:
https://github.com/entwanderer/yaml_extend
I make some examples for your reference.
import yaml
main_yaml = """
Package:
- !include _shape_yaml
- !include _path_yaml
"""
_shape_yaml = """
# Define
Rectangle: &id_Rectangle
name: Rectangle
width: &Rectangle_width 20
height: &Rectangle_height 10
area: !product [*Rectangle_width, *Rectangle_height]
Circle: &id_Circle
name: Circle
radius: &Circle_radius 5
area: !product [*Circle_radius, *Circle_radius, pi]
# Setting
Shape:
property: *id_Rectangle
color: red
"""
_path_yaml = """
# Define
Root: &BASE /path/src/
Paths:
a: &id_path_a !join [*BASE, a]
b: &id_path_b !join [*BASE, b]
# Setting
Path:
input_file: *id_path_a
"""
# define custom tag handler
def yaml_import(loader, node):
other_yaml_file = loader.construct_scalar(node)
return yaml.load(eval(other_yaml_file), Loader=yaml.SafeLoader)
def yaml_product(loader, node):
import math
list_data = loader.construct_sequence(node)
result = 1
pi = math.pi
for val in list_data:
result *= eval(val) if isinstance(val, str) else val
return result
def yaml_join(loader, node):
seq = loader.construct_sequence(node)
return ''.join([str(i) for i in seq])
def yaml_ref(loader, node):
ref = loader.construct_sequence(node)
return ref[0]
def yaml_dict_ref(loader: yaml.loader.SafeLoader, node):
dict_data, key, const_value = loader.construct_sequence(node)
return dict_data[key] + str(const_value)
def main():
# register the tag handler
yaml.SafeLoader.add_constructor(tag='!include', constructor=yaml_import)
yaml.SafeLoader.add_constructor(tag='!product', constructor=yaml_product)
yaml.SafeLoader.add_constructor(tag='!join', constructor=yaml_join)
yaml.SafeLoader.add_constructor(tag='!ref', constructor=yaml_ref)
yaml.SafeLoader.add_constructor(tag='!dict_ref', constructor=yaml_dict_ref)
config = yaml.load(main_yaml, Loader=yaml.SafeLoader)
pk_shape, pk_path = config['Package']
pk_shape, pk_path = pk_shape['Shape'], pk_path['Path']
print(f"shape name: {pk_shape['property']['name']}")
print(f"shape area: {pk_shape['property']['area']}")
print(f"shape color: {pk_shape['color']}")
print(f"input file: {pk_path['input_file']}")
if __name__ == '__main__':
main()
output
shape name: Rectangle
shape area: 200
shape color: red
input file: /path/src/a
Update 2
and you can combine it, like this
# xxx.yaml
CREATE_FONT_PICTURE:
PROJECTS:
SUNG: &id_SUNG
name: SUNG
work_dir: SUNG
output_dir: temp
font_pixel: 24
DEFINE: &id_define !ref [*id_SUNG] # you can use config['CREATE_FONT_PICTURE']['DEFINE'][name, work_dir, ... font_pixel]
AUTO_INIT:
basename_suffix: !dict_ref [*id_define, name, !product [5, 3, 2]] # SUNG30
# ↓ This is not correct.
# basename_suffix: !dict_ref [*id_define, name, !product [5, 3, 2]] # It will build by Deep-level. id_define is Deep-level: 2. So you must put it after 2. otherwise, it can't refer to the correct value.
With Symfony, its handling of yaml will indirectly allow you to nest yaml files. The trick is to make use of the parameters option. eg:
common.yml
parameters:
yaml_to_repeat:
option: "value"
foo:
- "bar"
- "baz"
config.yml
imports:
- { resource: common.yml }
whatever:
thing: "%yaml_to_repeat%"
other_thing: "%yaml_to_repeat%"
The result will be the same as:
whatever:
thing:
option: "value"
foo:
- "bar"
- "baz"
other_thing:
option: "value"
foo:
- "bar"
- "baz"
I think the solution used by #maxy-B looks great. However, it didn't succeed for me with nested inclusions. For example if config_1.yaml includes config_2.yaml, which includes config_3.yaml there was a problem with the loader. However, if you simply point the new loader class to itself on load, it works! Specifically, if we replace the old _include function with the very slightly modified version:
def _include(self, loader, node):
oldRoot = self.root
filename = os.path.join(self.root, loader.construct_scalar(node))
self.root = os.path.dirname(filename)
data = yaml.load(open(filename, 'r'), loader = IncludeLoader)
self.root = oldRoot
return data
Upon reflection I agree with the other comments, that nested loading is not appropriate for yaml in general as the input stream may not be a file, but it is very useful!
Based on previous posts:
class SimYamlLoader(yaml.SafeLoader):
'''
Simple custom yaml loader that supports include, e.g:
main.yaml:
- !include file1.yaml
- !include dir/file2.yaml
'''
def __init__(self, stream):
self.root = os.path.split(stream.name)[0]
super().__init__(stream)
def _include(loader, node):
filename = os.path.join(loader.root, loader.construct_scalar(node))
with open(filename, 'r') as f:
return yaml.load(f, SimYamlLoader)
SimYamlLoader.add_constructor('!include', _include)
# example:
with open('main.yaml', 'r') as f:
lists = yaml.load(f, SimYamlLoader)
# if you want to merge the lists
data = functools.reduce(
lambda x, y: x if y is None else {**x, **dict(y)}, lists, {})
# python 3.10+:lambda x, y: x if y is None else x | dict(y), lists, {})
Maybe this could inspire you, try to align to jbb conventions:
https://docs.openstack.org/infra/jenkins-job-builder/definition.html#inclusion-tags
- job:
name: test-job-include-raw-1
builders:
- shell:
!include-raw: include-raw001-hello-world.sh
Adding on #Joshbode's initial answer above , I modified the snippet a little to support UNIX style wild card patterns.
I haven't tested in windows though. I was facing an issue of splitting an array in a large yaml across multiple files for easy maintenance and was looking for a solution to refer multiple files within a same array of the base yaml. Hence the below solution. The solution does not support recursive reference. It only supports wildcards in a given directory level referenced in the base yaml.
import yaml
import os
import glob
# Base code taken from below link :-
# Ref:https://stackoverflow.com/a/9577670
class Loader(yaml.SafeLoader):
def __init__(self, stream):
self._root = os.path.split(stream.name)[0]
super(Loader, self).__init__(stream)
def include(self, node):
consolidated_result = None
filename = os.path.join(self._root, self.construct_scalar(node))
# Below section is modified for supporting UNIX wildcard patterns
filenames = glob.glob(filename)
# Just to ensure the order of files considered are predictable
# and easy to debug in case of errors.
filenames.sort()
for file in filenames:
with open(file, 'r') as f:
result = yaml.load(f, Loader)
if isinstance(result, list):
if not isinstance(consolidated_result, list):
consolidated_result = []
consolidated_result += result
elif isinstance(result, dict):
if not isinstance(consolidated_result, dict):
consolidated_result = {}
consolidated_result.update(result)
else:
consolidated_result = result
return consolidated_result
Loader.add_constructor('!include', Loader.include)
Usage
a:
!include a.yaml
b:
# All yamls included within b folder level will be consolidated
!include b/*.yaml
Combining other answers, here is a short solution without overloading Loader class and it works with any loader operating on files:
import json
from pathlib import Path
from typing import Any
import yaml
def yaml_include_constructor(loader: yaml.BaseLoader, node: yaml.Node) -> Any:
"""Include file referenced with !include node"""
# noinspection PyTypeChecker
fp = Path(loader.name).parent.joinpath(loader.construct_scalar(node)).resolve()
fe = fp.suffix.lstrip(".")
with open(fp, 'r') as f:
if fe in ("yaml", "yml"):
return yaml.load(f, type(loader))
elif fe in ("json", "jsn"):
return json.load(f)
else:
return f.read()
def main():
loader = yaml.SafeLoader # Works with any loader
loader.add_constructor("!include", yaml_include_constructor)
with open(...) as f:
yml = yaml.load(f, loader)
# noinspection PyTypeChecker is there to prevent PEP-check warning Expected type 'ScalarNode', got 'Node' instead when passing node: yaml.Node to loader.construct_scalar().
This solution fails if yaml.load input stream is not file stream, as loader.name does not contain the path in that case:
class Reader(object):
...
def __init__(self, stream):
...
if isinstance(stream, str):
self.name = "<unicode string>"
...
elif isinstance(stream, bytes):
self.name = "<byte string>"
...
else:
self.name = getattr(stream, 'name', "<file>")
...
In my use case, I know that only YAML files will be included, so the solution can be simplified further:
def yaml_include_constructor(loader: yaml.Loader, node: yaml.Node) -> Any:
"""Include YAML file referenced with !include node"""
with open(Path(loader.name).parent.joinpath(loader.construct_yaml_str(node)).resolve(), 'r') as f:
return yaml.load(f, type(loader))
Loader = yaml.SafeLoader # Works with any loader
Loader.add_constructor("!include", yaml_include_constructor)
def main():
with open(...) as f:
yml = yaml.load(f, Loader=Loader)
or even one-liner using lambda:
Loader = yaml.SafeLoader # Works with any loader
Loader.add_constructor("!include",
lambda l, n: yaml.load(Path(l.name).parent.joinpath(l.construct_scalar(n)).read_text(), type(l)))
Probably it was not supported when question was asked but you can import other YAML file into one:
imports: [/your_location_to_yaml_file/Util.area.yaml]
Though I don't have any online reference but this works for me.

Resources