summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore39
-rw-r--r--.travis.yml31
-rw-r--r--LICENSE22
-rw-r--r--MANIFEST.in10
-rw-r--r--Makefile47
-rw-r--r--README.md105
-rw-r--r--conftest.py3
-rw-r--r--dev-requirements.txt10
-rw-r--r--docs/License.txt22
-rw-r--r--docs/Makefile216
-rw-r--r--docs/authors.rst26
-rw-r--r--docs/basics.rst35
-rw-r--r--docs/conf.py283
-rw-r--r--docs/extensions.rst105
-rw-r--r--docs/index.rst215
-rw-r--r--docs/license.rst6
-rw-r--r--docs/partial-schemas.rst83
-rw-r--r--docs/release-notes.rst326
-rw-r--r--docs/testing.rst20
-rw-r--r--docs/upgrade-instructions.rst74
-rw-r--r--docs/validation-rules.rst646
-rw-r--r--examples/utf8-data.yml2
-rw-r--r--examples/utf8-schema.yml7
-rw-r--r--pykwalify/__init__.py57
-rw-r--r--pykwalify/cli.py95
-rw-r--r--pykwalify/compat.py56
-rw-r--r--pykwalify/core.py978
-rw-r--r--pykwalify/errors.py237
-rw-r--r--pykwalify/rule.py1358
-rw-r--r--pykwalify/types.py160
-rw-r--r--pytest.ini5
-rw-r--r--requirements.txt3
-rw-r--r--setup.cfg5
-rw-r--r--setup.py57
-rw-r--r--tests/__init__.py3
-rw-r--r--tests/files/README.md34
-rw-r--r--tests/files/cli/1a.yaml3
-rw-r--r--tests/files/cli/1b.yaml3
-rw-r--r--tests/files/cli/2a.yaml3
-rw-r--r--tests/files/cli/2b.yaml3
-rw-r--r--tests/files/fail/test_anchor.yaml95
-rw-r--r--tests/files/fail/test_assert.yaml34
-rw-r--r--tests/files/fail/test_default.yaml21
-rw-r--r--tests/files/fail/test_desc.yaml1
-rw-r--r--tests/files/fail/test_enum.yaml16
-rw-r--r--tests/files/fail/test_example.yaml1
-rw-r--r--tests/files/fail/test_extensions.yaml0
-rw-r--r--tests/files/fail/test_func.yaml0
-rw-r--r--tests/files/fail/test_ident.yaml23
-rw-r--r--tests/files/fail/test_include.yaml0
-rw-r--r--tests/files/fail/test_length.yaml113
-rw-r--r--tests/files/fail/test_mapping.yaml186
-rw-r--r--tests/files/fail/test_matching.yaml0
-rw-r--r--tests/files/fail/test_merge.yaml37
-rw-r--r--tests/files/fail/test_name.yaml0
-rw-r--r--tests/files/fail/test_nullable.yaml19
-rw-r--r--tests/files/fail/test_pattern.yaml26
-rw-r--r--tests/files/fail/test_range.yaml219
-rw-r--r--tests/files/fail/test_required.yaml19
-rw-r--r--tests/files/fail/test_schema.yaml0
-rw-r--r--tests/files/fail/test_sequence.yaml71
-rw-r--r--tests/files/fail/test_sequence_multi.yaml31
-rw-r--r--tests/files/fail/test_type_any.yaml1
-rw-r--r--tests/files/fail/test_type_bool.yaml52
-rw-r--r--tests/files/fail/test_type_date.yaml83
-rw-r--r--tests/files/fail/test_type_float.yaml56
-rw-r--r--tests/files/fail/test_type_int.yaml45
-rw-r--r--tests/files/fail/test_type_map.yaml46
-rw-r--r--tests/files/fail/test_type_none.yaml54
-rw-r--r--tests/files/fail/test_type_number.yaml83
-rw-r--r--tests/files/fail/test_type_scalar.yaml0
-rw-r--r--tests/files/fail/test_type_seq.yaml47
-rw-r--r--tests/files/fail/test_type_str.yaml73
-rw-r--r--tests/files/fail/test_type_symbol.yaml0
-rw-r--r--tests/files/fail/test_type_text.yaml70
-rw-r--r--tests/files/fail/test_type_timestamp.yaml15
-rw-r--r--tests/files/fail/test_unique.yaml109
-rw-r--r--tests/files/fail/test_version.yaml0
-rw-r--r--tests/files/partial_schemas/1f-data.yaml1
-rw-r--r--tests/files/partial_schemas/1f-partials.yaml11
-rw-r--r--tests/files/partial_schemas/1f-schema.yaml3
-rw-r--r--tests/files/partial_schemas/1s-data.yaml1
-rw-r--r--tests/files/partial_schemas/1s-partials.yaml11
-rw-r--r--tests/files/partial_schemas/1s-schema.yaml3
-rw-r--r--tests/files/partial_schemas/2f-data.yaml1
-rw-r--r--tests/files/partial_schemas/2f-schema.yaml5
-rw-r--r--tests/files/partial_schemas/2s-data.yaml3
-rw-r--r--tests/files/partial_schemas/2s-partials.yaml16
-rw-r--r--tests/files/partial_schemas/2s-schema.yaml3
-rw-r--r--tests/files/partial_schemas/3f-data.yaml1
-rw-r--r--tests/files/partial_schemas/3f-schema.yaml3
-rw-r--r--tests/files/partial_schemas/4f-data.yaml2
-rw-r--r--tests/files/partial_schemas/4f-schema.yaml20
-rw-r--r--tests/files/partial_schemas/5f-data.yaml1
-rw-r--r--tests/files/partial_schemas/5f-schema.yaml18
-rw-r--r--tests/files/partial_schemas/6f-data.yaml4
-rw-r--r--tests/files/partial_schemas/6f-schema.yaml22
-rw-r--r--tests/files/partial_schemas/7s-data.yaml5
-rw-r--r--tests/files/partial_schemas/7s-schema.yaml12
-rw-r--r--tests/files/success/test_anchor.yaml92
-rw-r--r--tests/files/success/test_assert.yaml28
-rw-r--r--tests/files/success/test_default.yaml0
-rw-r--r--tests/files/success/test_desc.yaml7
-rw-r--r--tests/files/success/test_enum.yaml12
-rw-r--r--tests/files/success/test_example.yaml5
-rw-r--r--tests/files/success/test_extensions.yaml0
-rw-r--r--tests/files/success/test_func.yaml0
-rw-r--r--tests/files/success/test_ident.yaml22
-rw-r--r--tests/files/success/test_include.yaml0
-rw-r--r--tests/files/success/test_length.yaml98
-rw-r--r--tests/files/success/test_mapping.yaml308
-rw-r--r--tests/files/success/test_matching.yaml0
-rw-r--r--tests/files/success/test_merge.yaml36
-rw-r--r--tests/files/success/test_name.yaml0
-rw-r--r--tests/files/success/test_nullable.yaml11
-rw-r--r--tests/files/success/test_pattern.yaml18
-rw-r--r--tests/files/success/test_range.yaml166
-rw-r--r--tests/files/success/test_required.yaml15
-rw-r--r--tests/files/success/test_schema.yaml0
-rw-r--r--tests/files/success/test_sequence.yaml44
-rw-r--r--tests/files/success/test_sequence_multi.yaml64
-rw-r--r--tests/files/success/test_type_any.yaml27
-rw-r--r--tests/files/success/test_type_bool.yaml39
-rw-r--r--tests/files/success/test_type_date.yaml40
-rw-r--r--tests/files/success/test_type_enum.yaml43
-rw-r--r--tests/files/success/test_type_float.yaml43
-rw-r--r--tests/files/success/test_type_int.yaml38
-rw-r--r--tests/files/success/test_type_map.yaml39
-rw-r--r--tests/files/success/test_type_none.yaml47
-rw-r--r--tests/files/success/test_type_number.yaml76
-rw-r--r--tests/files/success/test_type_scalar.yaml76
-rw-r--r--tests/files/success/test_type_seq.yaml36
-rw-r--r--tests/files/success/test_type_str.yaml56
-rw-r--r--tests/files/success/test_type_symbol.yaml0
-rw-r--r--tests/files/success/test_type_text.yaml75
-rw-r--r--tests/files/success/test_type_timestamp.yaml37
-rw-r--r--tests/files/success/test_unique.yaml138
-rw-r--r--tests/files/success/test_version.yaml5
-rw-r--r--tests/files/unicode/1f.yaml13
-rw-r--r--tests/files/unicode/1s.yaml10
-rw-r--r--tests/files/unicode/3f.yaml10
-rw-r--r--tests/files/unicode/3s.yaml7
-rw-r--r--tests/test_cli.py62
-rw-r--r--tests/test_core.py572
-rw-r--r--tests/test_core_methods.py308
-rw-r--r--tests/test_exceptions.py29
-rw-r--r--tests/test_helper.py33
-rw-r--r--tests/test_rule.py399
-rw-r--r--tests/test_types.py75
-rw-r--r--tests/test_unicode.py137
-rw-r--r--tox.ini16
151 files changed, 10597 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..77ce730
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,39 @@
+*.py[cod]
+
+# C extensions
+*.so
+
+# Packages
+*.egg
+*.egg-info
+docs/_build
+docs/_build_html
+.cache
+dist
+build
+eggs
+parts
+bin
+var
+sdist
+develop-eggs
+.installed.cfg
+#lib
+lib64
+
+# Installer logs
+pip-log.txt
+
+# Unit test / coverage reports
+.coverage*
+.tox
+nosetests.xml
+htmlcov
+
+# Translations
+*.mo
+
+# Mr Developer
+.mr.developer.cfg
+.project
+.pydevproject
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..b25de8e
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,31 @@
+sudo: false
+language: python
+python:
+ - "2.7"
+ - "3.5"
+ - "3.5"
+ - "3.6"
+ - "3.7-dev"
+ - "nightly"
+
+install:
+ - pip install -r dev-requirements.txt
+
+script:
+ - "if [[ $RUAMEL == '1' ]]; then pip install ruamel.yaml; fi"
+ - coverage erase
+ - coverage run --source pykwalify -p -m py.test -v
+ - flake8 --max-line-length=160 --show-source --statistics --exclude=.venv,.tox,dist,docs,build,.git
+ - python setup.py sdist bdist
+
+env:
+ - RUAMEL=0
+ - RUAMEL=1
+
+after_success:
+ - coverage combine
+ - coveralls
+
+matrix:
+ allow_failures:
+ - python: "nightly"
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..6c3786a
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2013-2018 Johan Andersson
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..41f4c53
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,10 @@
+exclude *.py
+include docs/authors.rst
+include docs/license.rst
+include docs/release-notes.rst
+include setup.py
+include README.md
+include LICENSE
+global-exclude __pycache__/*
+recursive-include tests *
+recursive-exclude tests *.py[co] \ No newline at end of file
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..2a76f88
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,47 @@
+help:
+ @echo "Please use 'make <target>' where <target> is one of"
+ @echo " clean remove temporary files created by build tools"
+ @echo " cleanegg remove temporary files created by build tools"
+ @echo " cleanpy remove temporary python files"
+ @echo " cleancov remove files used and generated by coverage tools"
+ @echo " cleanall all the above + tmp files from development tools (Not cleantox)"
+ @echo " cleantox remove files created by tox"
+ @echo " test run test suite"
+ @echo " sdist make a source distribution"
+ @echo " install install package"
+
+clean:
+ -rm -f MANIFEST
+ -rm -rf dist/
+ -rm -rf build/
+
+cleantox:
+ -rm -rf .tox/
+
+cleancov:
+ coverage combine
+ coverage erase
+ -rm -rf htmlcov/
+
+cleanegg:
+ -rm -rf pykwalify.egg-info/
+
+cleanpy:
+ -find . -type f -name "*~" -exec rm -f "{}" \;
+ -find . -type f -name "*.orig" -exec rm -f "{}" \;
+ -find . -type f -name "*.rej" -exec rm -f "{}" \;
+ -find . -type f -name "*.pyc" -exec rm -f "{}" \;
+ -find . -type f -name "*.parse-index" -exec rm -f "{}" \;
+ -find . -type d -name "__pycache__" -exec rm -rf "{}" \;
+
+cleanall: clean cleanegg cleanpy cleancov
+
+test:
+ coverage erase
+ coverage run --source pykwalify/ -m python py.test
+
+sdist:
+ python setup.py sdist
+
+install:
+ python setup.py install
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..967ecff
--- /dev/null
+++ b/README.md
@@ -0,0 +1,105 @@
+# pyKwalify
+
+YAML/JSON validation library
+
+This framework is a port with a lot of added functionality of the Java version of the framework kwalify that can be found at http://www.kuwata-lab.com/kwalify/
+
+The original source code can be found at http://sourceforge.net/projects/kwalify/files/kwalify-java/0.5.1/
+
+The source code of the latest release that has been used can be found at https://github.com/sunaku/kwalify. Please note that source code is not the original authors code but a fork/upload of the last release available in Ruby.
+
+The schema this library is based on and extended from: http://www.kuwata-lab.com/kwalify/ruby/users-guide.01.html#schema
+
+
+# Usage
+
+Create a data file. `Json` and `Yaml` formats are both supported.
+
+```yaml
+- foo
+- bar
+```
+
+Create a schema file with validation rules.
+
+```yaml
+type: seq
+sequence:
+ - type: str
+```
+
+Run validation from cli.
+
+```bash
+pykwalify -d data.yaml -s schema.yaml
+```
+
+
+## Examples
+
+The documentation describes in detail how each keyword and type works and what is possible in each case.
+
+But there is a lot of real world examples that can be found in the test data/files. It shows a lot of examples of how all keywords and types work in practice and in combination with each other.
+
+The files can be found here and show both schema/data combinations that will work and that will fail.
+
+ - `tests/files/success/`
+ - `tests/files/fail/`
+ - `tests/files/partial_schemas/`
+
+
+# PyYaml and ruamel.yaml
+
+`PyYaml` is the default installed YAML parser and `ruamel.yaml` is possible to install at the same time with the following command
+
+```bash
+pip install 'pykwalify[ruamel]'
+
+# or for development:
+
+pip install -e '.[ruamel]'
+```
+
+`ruamel.yaml` will however be used if both are installed because it is more up to date and includes the YAML 1.2 specification that `PyYaml` does not support.
+
+`PyYaml` will still be the default parser because it is used more and is still considered the default `YAML` parser in the Python world.
+
+Depending on how both libraries are developed, this can change in the future in any major update.
+
+
+
+## UTF-8 and data encoding
+
+If you have problems with unicode values not working properly when running pykwalify on Python 2.7.x then try to add this environment variable to your execution:
+
+```
+PYTHONIOENCODING=UTF-8 pykwalify ...
+```
+
+and it might help to force UTF-8 encoding on all string objects. If this does not work please open up an issue with your schema and data that can be used to track down the problem in the source code.
+
+
+# Project details
+
+| | |
+|---|---|
+| python support | 2.7, 3.5, 3.6, 3.7 |
+| Source | https://github.com/Grokzen/pykwalify |
+| Docs (Latest release) | http://pykwalify.readthedocs.io/en/master/ |
+| Docs (Unstable branch) | http://pykwalify.readthedocs.io/en/unstable/ |
+| Gitter (Free Chat) | [![Gitter](https://badges.gitter.im/Join Chat.svg)](https://gitter.im/Grokzen/pykwalify?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) |
+| Changelog | https://github.com/Grokzen/pykwalify/blob/unstable/docs/release-notes.rst |
+| Upgrade instructions | https://github.com/Grokzen/pykwalify/blob/unstable/docs/upgrade-instructions.rst |
+| Issues | https://github.com/Grokzen/pykwalify/issues |
+| Travis (master) | [![Build Status](https://travis-ci.org/Grokzen/pykwalify.svg?branch=master)](https://travis-ci.org/Grokzen/pykwalify) https://travis-ci.org/Grokzen/pykwalify |
+| Travis (unstable) | [![Build Status](https://travis-ci.org/Grokzen/pykwalify.svg?branch=unstable)](https://travis-ci.org/Grokzen/pykwalify) https://travis-ci.org/Grokzen/pykwalify |
+| Test coverage | [![Coverage Status](https://coveralls.io/repos/Grokzen/pykwalify/badge.png?branch=master)](https://coveralls.io/r/Grokzen/pykwalify) https://coveralls.io/github/Grokzen/pykwalify |
+| pypi | https://pypi.python.org/pypi/pykwalify/ |
+| Open Hub | https://www.openhub.net/p/pykwalify |
+| License | `MIT` https://github.com/Grokzen/pykwalify/blob/unstable/docs/license.rst |
+| Copyright | `Copyright (c) 2013-2017 Johan Andersson` |
+| git repo | `git clone git@github.com:Grokzen/pykwalify.git` |
+| install stable | `pip install pykwalify` |
+| install dev | `$ git clone git@github.com:Grokzen/pykwalify.git pykwalify`<br>`$ cd ./pykwalify`<br>`$ virtualenv .venv`<br>`$ source .venv/bin/activate`<br>`$ pip install -r dev-requirements.txt`<br>`$ pip install -e .` |
+| required dependencies | `docopt >= 0.6.2`<br> `python-dateutil >= 2.4.2` |
+| supported yml parsers | `PyYaml >= 3.11`<br>`ruamel.yaml >= 0.11.0` |
diff --git a/conftest.py b/conftest.py
new file mode 100644
index 0000000..5fbd0b6
--- /dev/null
+++ b/conftest.py
@@ -0,0 +1,3 @@
+# coding=utf-8
+"""
+"""
diff --git a/dev-requirements.txt b/dev-requirements.txt
new file mode 100644
index 0000000..be6279a
--- /dev/null
+++ b/dev-requirements.txt
@@ -0,0 +1,10 @@
+-r requirements.txt
+
+testfixtures
+pytest>=3.6.0
+tox
+coveralls
+flake8
+ptpdb
+ptpython
+setuptools
diff --git a/docs/License.txt b/docs/License.txt
new file mode 100644
index 0000000..6c3786a
--- /dev/null
+++ b/docs/License.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2013-2018 Johan Andersson
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..0d929e7
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,216 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " applehelp to make an Apple Help Book"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " xml to make Docutils-native XML files"
+ @echo " pseudoxml to make pseudoxml-XML files for display purposes"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+ @echo " coverage to run coverage check of the documentation (if enabled)"
+
+.PHONY: clean
+clean:
+ rm -rf $(BUILDDIR)/*
+
+.PHONY: html
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+.PHONY: dirhtml
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+.PHONY: singlehtml
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+.PHONY: pickle
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+.PHONY: json
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+.PHONY: htmlhelp
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+.PHONY: qthelp
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pykwalify.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pykwalify.qhc"
+
+.PHONY: applehelp
+applehelp:
+ $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
+ @echo
+ @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
+ @echo "N.B. You won't be able to view it unless you put it in" \
+ "~/Library/Documentation/Help or install it in your application" \
+ "bundle."
+
+.PHONY: devhelp
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/pykwalify"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pykwalify"
+ @echo "# devhelp"
+
+.PHONY: epub
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+.PHONY: latex
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+.PHONY: latexpdf
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+.PHONY: latexpdfja
+latexpdfja:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through platex and dvipdfmx..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+.PHONY: text
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+.PHONY: man
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+.PHONY: texinfo
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+.PHONY: info
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+.PHONY: gettext
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+.PHONY: changes
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+.PHONY: linkcheck
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+.PHONY: doctest
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
+
+.PHONY: coverage
+coverage:
+ $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
+ @echo "Testing of coverage in the sources finished, look at the " \
+ "results in $(BUILDDIR)/coverage/python.txt."
+
+.PHONY: xml
+xml:
+ $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+ @echo
+ @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+.PHONY: pseudoxml
+pseudoxml:
+ $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+ @echo
+ @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/docs/authors.rst b/docs/authors.rst
new file mode 100644
index 0000000..c573ad6
--- /dev/null
+++ b/docs/authors.rst
@@ -0,0 +1,26 @@
+Authors
+=======
+
+
+Code
+----
+
+ - Grokzen (https://github.com/Grokzen)
+ - Markbaas (https://github.com/markbaas)
+ - Gonditeniz (https://github.com/gonditeniz)
+ - Comagnaw (https://github.com/comagnaw)
+ - Cogwirrel (https://github.com/cogwirrel)
+
+
+Testing
+-------
+
+ - Glenn Schmottlach (https://github.com/gschmottlach-xse)
+
+
+
+Documentation
+-------------
+
+ - Grokzen (https://github.com/Grokzen)
+ - Scott Lowe (https://github.com/scottclowe)
diff --git a/docs/basics.rst b/docs/basics.rst
new file mode 100644
index 0000000..b2bd878
--- /dev/null
+++ b/docs/basics.rst
@@ -0,0 +1,35 @@
+Basic Usage
+===========
+
+Create a data ``json`` or ``yaml`` file.
+
+.. code-block:: yaml
+
+ # Data file (data.yaml)
+ - foo
+ - bar
+
+Create a schema file with validation rules.
+
+.. code-block:: yaml
+
+ # Schema file (schema.yaml)
+ type: seq
+ sequence:
+ - type: str
+
+Run validation from cli.
+
+.. code-block:: bash
+
+ pykwalify -d data.yaml -s schema.yaml
+
+Or if you want to run the validation from inside your code directly.
+
+.. code-block:: python
+
+ from pykwalify.core import Core
+ c = Core(source_file="data.yaml", schema_files=["schema.yaml"])
+ c.validate(raise_exception=True)
+
+If validation fails then exception will be raised.
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..be89c79
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,283 @@
+# -*- coding: utf-8 -*-
+#
+# pykwalify documentation build configuration file, created by
+# sphinx-quickstart on Sun Mar 6 16:03:21 2016.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = []
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'pykwalify'
+copyright = u'2013 - 2016, Johan Andersson'
+author = u'Johan Andersson'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = u'1.7.0'
+# The full version, including alpha/beta/rc tags.
+release = u'1.7.0'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (relative to this directory) to use as a favicon of
+# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
+#html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+#html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'pykwalifydoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+
+# Latex figure (float) alignment
+#'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'pykwalify.tex', u'pykwalify Documentation',
+ u'Johan Andersson', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 'pykwalify', u'pykwalify Documentation',
+ [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'pykwalify', u'pykwalify Documentation',
+ author, 'pykwalify', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
diff --git a/docs/extensions.rst b/docs/extensions.rst
new file mode 100644
index 0000000..24dee64
--- /dev/null
+++ b/docs/extensions.rst
@@ -0,0 +1,105 @@
+Extensions
+==========
+
+It is possible to extend the validation of each of the three basic types, ``map`` & ``seq`` & ``scalar``.
+
+Extensions can be used to do more complex validation that is not natively supported by the core pykwalify lib.
+
+
+
+Loading extensions
+------------------
+
+There are 2 ways to load extensions into a schema.
+
+First you can specify any ``*.py`` file via the cli via the ``-e FILE`` or ``--extension FILE`` flag. If you would do this when using pykwalify as a library you should pass in a list of files to the ``extensions`` variable to the ``Core`` class.
+
+The second way is to specify a list of files in the keyword ``extensions`` that can only be specified at the top level of the schema. The files can be either relative or absolute.
+
+
+
+How custom validation works
+---------------------------
+
+Each function defined inside the extension must be defined with a globally unique method name and the following variables
+
+.. code-block:: python
+
+ def method_name(value, rule_obj, path):
+ pass
+
+To raise a validation error you can either raise any exception and it will propegate up to the caller or you can return ``True`` or ``False``. Any value/object that will be interpreted as ``False`` inside a if check will cause a ``CoreError`` validation error to be raised.
+
+When using a validation function on a ``sequence``, the method will be called with the entire list content as the value.
+
+When using a validation function on a ``mapping``, the method will be called with the entire dict content as the value.
+
+When using a validation function on any ``scalar`` type value, the method will be called with the scalar value.
+
+This is a example of how to use extensions inside a simple schema
+
+.. code-block:: yaml
+
+ # Schema
+ extensions:
+ - e.py
+ type: map
+ func: ext_map
+ mapping:
+ foo:
+ type: seq
+ func: ext_list
+ sequence:
+ - type: str
+ func: ext_str
+
+.. code-block:: yaml
+
+ # Data
+ foo:
+ - foo
+ - bar
+
+This is the extension file named ``ext.py`` that is located in the same directory as the schema file.
+
+.. code-block:: python
+
+ # -*- coding: utf-8 -*-
+ import logging
+ log = logging.getLogger(__name__)
+
+
+ def ext_str(value, rule_obj, path):
+ log.debug("value: %s", value)
+ log.debug("rule_obj: %s", rule_obj)
+ log.debug("path: %s", path)
+
+ # Either raise some exception that you have defined your self
+ # raise AssertionError('Custom assertion error in jinja_function()')
+
+ # Or you should return True/False that will tell if it validated
+ return True
+
+
+ def ext_list(value, rule_obj, path):
+ log.debug("value: %s", value)
+ log.debug("rule_obj: %s", rule_obj)
+ log.debug("path: %s", path)
+
+ # Either raise some exception that you have defined your self
+ # raise AssertionError('Custom assertion error in jinja_function()')
+
+ # Or you should return True/False that will tell if it validated
+ return True
+
+
+ def ext_map(value, rule_obj, path):
+ log.debug("value: %s", value)
+ log.debug("rule_obj: %s", rule_obj)
+ log.debug("path: %s", path)
+
+ # Either raise some exception that you have defined your self
+ # raise AssertionError('Custom assertion error in jinja_function()')
+
+ # Or you should return True/False that will tell if it validated
+ return True
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 0000000..feb31c1
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,215 @@
+.. pykwalify documentation master file, created by
+ sphinx-quickstart on Sun Mar 6 16:03:21 2016.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to pykwalify's documentation!
+=====================================
+
+PyKwalify is a open source port of the kwalify lib and specification. The original source code was written in Java but this port is based on Python. The code is open source, and `available on github`_.
+
+.. _available on github: http://github.com/grokzen/pykwalify
+
+
+YAML/JSON validation library
+
+This framework is a port with alot added functionality of the java version of the framework kwalify that can be found at: http://www.kuwata-lab.com/kwalify/
+
+The source code can be found at: http://sourceforge.net/projects/kwalify/files/kwalify-java/0.5.1/
+
+The schema this library is base and extended from: http://www.kuwata-lab.com/kwalify/ruby/users-guide.01.html#schema
+
+
+
+Usage
+-----
+
+Create a data file. `Json` and `Yaml` formats are both supported.
+
+.. code-block:: yaml
+
+ - foo
+ - bar
+
+Create a schema file with validation rules.
+
+.. code-block:: yaml
+
+ type: seq
+ sequence:
+ - type: str
+
+Run validation from cli.
+
+.. code-block:: bash
+
+ pykwalify -d data.yaml -s schema.yaml
+
+
+
+Examples
+--------
+
+The documentation describes in detail how each keyword and type works and what is possible in each case.
+
+But there is a lot of real world examples that can be found in the test data/files. It shows alot of examples of how all keywords and types work in practise and in combination with eachother.
+
+The files can be found here and it shows both schema/data combinations that will work and that will fail.
+
+ - `tests/files/success/`
+ - `tests/files/fail/`
+ - `tests/files/partial_schemas/`
+
+
+
+PyYaml and ruamel.yaml
+----------------------
+
+``PyYaml`` is the default installed yaml parser and ``ruamel.yaml`` is possible to install at the same time with the following command
+
+.. code-block:: bash
+
+ pip install 'pykwalify[ruamel]'
+
+ # or for development:
+
+ pip install -e '.[ruamel]'
+
+``ruamel.yaml`` will however be used if both is installed becuase it is more up to date and includes the YAML 1.2 specification that ``PyYaml`` do not support.
+
+``PyYaml`` will still be the default parser becuase it is used more and is still considered the default ``YAML`` parser in the python world.
+
+Depending on how both libraries is developed, this can change in the future in any major update.
+
+
+
+UTF-8 and data encoding
+-----------------------
+
+If you have problems with unicode values not working properly when running pykwalify on python 2.7.x then try to add
+this environment variable to your execution and it might help to force UTF-8 encoding on all string objects.
+
+If this do not work please open up a issue with your schema and data that can be used to track down the problem in the source code.
+
+.. code-block:: bash
+
+ PYTHONIOENCODING=UTF-8 pykwalify ...
+
+
+
+Project details
+---------------
+
+.. |travis-master| image:: https://travis-ci.org/Grokzen/pykwalify.svg?branch=master
+ :target: https://travis-ci.org/Grokzen/pykwalify.svg?branch=master
+
+.. |travis-unstable| image:: https://travis-ci.org/Grokzen/pykwalify.svg?branch=unstable
+ :target: https://travis-ci.org/Grokzen/pykwalify.svg?branch=unstable
+
+.. |gitter-badge| image:: https://badges.gitter.im/Join Chat.svg
+ :target: https://gitter.im/Grokzen/pykwalify?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
+
+.. |coveralls-badge| image:: https://coveralls.io/repos/github/Grokzen/pykwalify/badge.svg?branch=unstable
+ :target: https://coveralls.io/repos/github/Grokzen/pykwalify/badge.svg?branch=unstable
+
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| python support | 2.7, 3.3, 3.4, 3.5, 3.6, 3.7 |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Source | https://github.com/Grokzen/pykwalify |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Docs (Latest release) | http://pykwalify.readthedocs.io/en/master/ |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Docs (Unstable branch)| http://pykwalify.readthedocs.io/en/unstable/ |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Gitter (Free Chat) | |gitter-badge| |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Changelog | https://github.com/Grokzen/pykwalify/blob/unstable/docs/release-notes.rst |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Upgrade instructions | https://github.com/Grokzen/pykwalify/blob/unstable/docs/upgrade-instructions.rst |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Issues | https://github.com/Grokzen/pykwalify/issues |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Travis (master) | |travis-master| |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Travis (unstable) | |travis-unstable| |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Test coverage | |coveralls-badge| |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| pypi | https://pypi.python.org/pypi/pykwalify/ |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Open Hub | https://www.openhub.net/p/pykwalify |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| License | MIT https://github.com/Grokzen/pykwalify/blob/unstable/docs/license.rst |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Copyright | Copyright (c) 2013-2017 Johan Andersson |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| git repo | git clone git@github.com:Grokzen/pykwalify.git |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| install stable | pip install pykwalify |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| install dev | .. code-block:: bash |
+| | |
+| | $ git clone git@github.com:Grokzen/pykwalify.git pykwalify |
+| | $ cd ./pykwalify |
+| | $ virtualenv .venv |
+| | $ source .venv/bin/activate |
+| | $ pip install -r dev-requirements.txt |
+| | $ pip install -e . |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| required dependencies | | docopt >= 0.6.2 |
+| | | python-dateutil >= 2.4.2 |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| supported yml parsers | | PyYaml >= 3.11 |
+| | | ruamel.yaml >= 0.11.0 |
++-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+
+
+
+The Usage Guide
+---------------
+
+.. _validation-rules-docs:
+
+.. toctree::
+ :maxdepth: 2
+ :glob:
+
+ basics
+ validation-rules
+
+
+
+.. _partial-schemas-docs:
+
+.. toctree::
+ :maxdepth: 2
+ :glob:
+
+ partial-schemas
+
+
+
+.. _extensions-docs:
+
+.. toctree::
+ :maxdepth: 2
+ :glob:
+
+ extensions
+
+
+
+The Community Guide
+--------------------
+
+.. _community-guide:
+
+.. toctree::
+ :maxdepth: 1
+ :glob:
+
+ testing
+ upgrade-instructions
+ release-notes
+ authors
+ license
diff --git a/docs/license.rst b/docs/license.rst
new file mode 100644
index 0000000..bcea826
--- /dev/null
+++ b/docs/license.rst
@@ -0,0 +1,6 @@
+Licensing
+---------
+
+MIT, See docs/License.txt for details
+
+Copyright (c) 2013-2017 Johan Andersson
diff --git a/docs/partial-schemas.rst b/docs/partial-schemas.rst
new file mode 100644
index 0000000..edcd2a5
--- /dev/null
+++ b/docs/partial-schemas.rst
@@ -0,0 +1,83 @@
+Partial schemas
+===============
+
+It is possible to create small partial schemas that can be included in other schemas.
+
+This feature do not use any built-in ``YAML`` or ``JSON`` linking.
+
+To define a partial schema use the keyword ``schema;(schema-id):``. ``(schema-id)`` name must be globally unique. If collisions is detected then error will be raised.
+
+To use a partial schema use the keyword ``include: (schema-id):``. This will work at any place you can specify the keyword ``type``. Include directive do not currently work inside a partial schema.
+
+It is possible to define any number of partial schemas in any schema file as long as they are defined at top level of the schema.
+
+For example, this schema contains one partial and the regular schema.
+
+.. code-block:: yaml
+
+ # Schema
+ schema;map_str:
+ type: map
+ mapping:
+ foo:
+ type: str
+
+ type: seq
+ sequence:
+ - include: map_str
+
+.. code-block:: yaml
+
+ # Data
+ - foo: opa
+
+
+
+schema;(schema-name)
+--------------------
+
+See the ``Partial schemas`` section for details.
+
+Names must be globally unique.
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ schema;list_str:
+ type: seq
+ sequence:
+ - type: str
+
+ schema;list_int:
+ type: seq
+ sequence:
+ - type: int
+
+
+
+Include
+-------
+
+Used in ``partial schema`` system. Includes are lazy and are loaded during parsing / validation.
+
+Example
+
+.. code-block:: yaml
+
+ # Schema [barfoo.yaml]
+ schema;list_str:
+ type: seq
+ sequence:
+ - type: str
+
+.. code-block:: yaml
+
+ # Schema [foobar.yaml]
+ include: list_str
+
+.. code-block:: yaml
+
+ # Data
+ - foobar
diff --git a/docs/release-notes.rst b/docs/release-notes.rst
new file mode 100644
index 0000000..8022b1b
--- /dev/null
+++ b/docs/release-notes.rst
@@ -0,0 +1,326 @@
+Release Notes
+=============
+
+1.7.0 (October 3, 2018)
+-----------------------
+
+**IMPORTANT** LICENSE UPDATE **IMPORTANT**
+
+- In this release there was a line removed from the license file. It was modified in the following commit
+ (cc4e31b39ff4cce2dfbc662baa6469470dd3c033 Wed Oct 3 16:20:59 2018 +0200) and will be the main reason for
+ the 1.7.0 release. All commits and tags and releases (1.6.1 and all releases before it) prior to this commit will
+ use the old license that includes the change done in the above mentioned commit. Only release 1.7.0 and commits past
+ this point will use the new/updated license file.
+
+General changes:
+
+- Dropped support for python 3.3 and 3.4
+
+
+1.6.1 (March 13, 2018)
+--------------------
+
+New keywords:
+
+- Added support for keyword *nullable*. It is now possible to allow for a key not to be empty, when *required* keyword is not used.
+- Added support for keyword *class*. It will not cause any validation errors, but serves to make kwalify schemas compatible that uses that keywork.
+
+Bug fixes:
+
+- Improved compatibility with unicodes to validate as strings.
+
+Changed behaviour:
+
+- Propergate json and yaml loading errors when used from the cli to the user for easier debugging.
+
+General changes:
+
+- Allow ruamel.yaml versions up to 0.16
+- License is now bundled with the built release.
+
+
+1.6.0 (Jan 22, 2017)
+--------------------
+
+New keywords:
+
+- Add support for keyword *example*. It does nothing and have no validation done on it.
+- Add support for keyword *version*. It does nothing and have no validation done on it.
+- Add support for keyword *date* and added support keyword *format*. This can be used to validate many different types of *datetime* objects.
+- Add support for keyword *length*. It is very similar to *range* but works primarily string types.
+- Add support for keyword *assert*. It works by running the python code *assert <assert-expr>* and check if any exception is raised.
+ This feature is considered dangerouns becuase there is only simple logic to prevent escaping out from validation.
+
+Bug fixes:
+
+- Fixed a bug where regexes marked as 'required' in a map were matched as strings, rather than regexes.
+- Fixed a bug where the type validation did not work when schema specefied a sequence of map objects. It now outputs a proper `...is not a dict...` error instead.
+- Fixed a bug in *unique* validation when a key that it tried to lookup in the data would not exists.
+ Now it just ignores that it did not find any value becuase a missing value do not impact validation.
+- Fixed a bug with keyword *ident* when the rule value was verified to be a *boolean*. It now only accepts *boolean* values as expected.
+- Fixed a bug where if *allowempty* was specefied in a mapping type inside a sequence type then it would not properly validate.
+- Fixed a bug where loaded extensions would not allways work in complex & nested objects.
+- Fixed a major bug in very nested *seq* schemas where if the schema expected another *seq* but the value was something else it would not raise it as a validation error.
+ This has now been fixed and now raises the proper error.
+- Fixed a bug where include directive would not work in all cases when used inside a key in a mapping block.
+
+New features:
+
+- It is now possible to specify a default rule when using a mapping.
+ The rule will be used whenever no other key could be found.
+ This is a port of a missing feature from original kwalify implementation.
+- Added new helper method *keywords* to *Rule* class that can output all active keywords for any *Rule* object.
+ This helps when debugging code to be able to easily dump what all active keywords for any *Rule* object.
+- Added new cli flag *--strict-rule-validation* that will validate that all used keywords in all *Rule* objects only uses the rules that is supported by each defined type.
+ If you only use a *Core* object then set *strict_rule_validation=True* when creating the *Core* object instance.
+ This feature is opt-in in this releaes but will be mandatory in *releases >= 1.7.0*.
+- Added new cli flag *--fix-ruby-style-regex* that will trim slashes from ruby style regex/patterns.
+ When using this flag the first and last */* will be trimmed of the pattern before running validation.
+ If you only use a *Core* object then set *fix_ruby_style_regex=True* when creating the *Core* object instance.
+ Default behaviour will still be that you should use python style regex values but this flag can help in some cases when you can't change the schema.
+- Added new cli flag *--allow-assertions* that will enable the otherwise blocked keyword *assert*.
+ This flag was introduced so that pykwalify would not assert assertions without user controll.
+ Default behaviour will be to raise a *CoreError* is assertion is used but not allowed explicitly.
+ If you only use a *Core* object then set *allow_assertions=True* when creating the *Core* object instance.
+
+Changed behaviour:
+
+- Removed the force of *UTF-8* encoding when importing pykwalify package. It caused issues with *jypiter notebooks* on python 2.7.x
+ Added documentation in Readme regarding the suggested solution to use *PYTHONIOENCODING=UTF-8* if the default solution do not work.
+- Validation do no longer continue to process things like *pattern*, *regex*, *timestamp*, *range* and other additional checks
+ if the type check fails. This can cause problems where previous errors will now initially be silenced when the typecheck for
+ that value fails, but reappear again when the type check is fixed. (sbrunner)
+- Catches *TypeError* when doing regex validation. That happens when the value is not a parsable string type.
+- Checking that the value is a valid dict object is now done even if the mapping keyword is not specefied in the schema.
+ This makes that check more eager and errors can apear that previously was not there.
+- Changed the sane default type if that key is not defined to be *str*. Before this, type had to be defined every time and the default type did not work as expected.
+ This is a major change and can cause validation to either fail or to stop failing depending on the case.
+- Changed validation for if a value is required and a value in a list for example is *None*. It now adds a normal validation errors instead of raising a *CoreError*.
+- Value for keyword *desc* now *MUST* be a string or a *RuleError* will be raised.
+- Value for keyword *example* now *MUST* be a string or a *RuleError* will be raised.
+- Value for keyword *name* now *MUST* be a string or a *RuleError* will be raised.
+
+General changes:
+
+- Ported alot of testcases directly from *Kwalify* test data (*test-validator.yaml -> 30f.yaml & 43s.yaml*) so that this lib can have greater confidence that rules is implemented in the same way as *Kwalify*.
+- Refactored *test_core_files* method to now accept test files with multiple of documents. The method now tries to read all documents from each test file and run each document seperatly.
+ It now alos reports more detailed about what file and document that fails the test to make it easier to track down problems.
+- Major refactoring of test files to now be grouped based on what they are testing instead of a increased counter that do not represent anything.
+ It will be easier to find out what keywords lack tests and what keywords that have enough tests.
+
+
+1.5.2 (Nov 12, 2016)
+--------------------
+
+- Convert all documentation to readthedocs
+- True/False is no longer considered valid integer
+- python3 'bytes' objects is now a valid strings and text type
+- The regular PyYaml support is now deprecated in favor of ruamel.yaml, see the following link for more details about
+ PyYaml being deprecated https://bitbucket.org/xi/pyyaml/issues/59/has-this-project-been-abandoned
+ PyYaml will still be possible to use in the next major release version (1.6.0) but removed in release (1.7.0) and forward.
+- ruamel.yaml is now possible to install with the following command for local development *pip install -e '.[ruamel]'*
+ and for production, use *pip install 'pykwalify[ruamel]'*
+- ruamel.yaml is now used before PyYaml if installed on your system
+- Fixed a bug where scalar type was not validated correctly.
+- Unpin all dependencies but still maintain a minimum versions of each lib
+- Allowed mixing of regex and normal keywords when matching a string (jmacarthur)
+
+
+1.5.1 (Mar 6, 2016)
+----------------
+
+- Improvements to documentation (scottclowe).
+- Improved code linting by reworking private variables in Rule class to now be properties and updated
+ all code that used the old way.
+- Improved code linting by reworking all Log messages to render according to pep standard.
+ (By using %s and passing in variables as positional arguments)
+- Fix bug when validating sequence and value should only be unicode escaped when a string
+- Improve validation of timestamps.
+- Improve float validation to now accept strings that is valid ints that uses scientific notation, "1e-06" for example.
+- Update travis to test against python 3.6
+
+
+1.5.0 (Sep 30, 2015)
+--------------------
+
+- float / number type now support range restrictions
+- ranges on non number types (e.g. seq, string) now need to be non negative.
+- Fixed encoding bug triggered when both regex matching-rule 'any' and 'all' found keyword that
+ failed regex match. Added failure unit tests to cover regex matching-rule 'any' and 'all' during
+ failed regex match. Updated allowed rule list to include matching-rule 'all'.
+- Changed _validate_mappings method from using re.match to re.search. This fixes bug related to
+ multiple keyword regex using matching-rule 'any'. Added success unit tests to test default, 'any',
+ and 'all' matching-rule.
+
+
+1.4.1 (Aug 27, 2015)
+--------------------
+
+- Added tests to sdist to enable downstream packaging to run tests. No code changes in this release.
+
+
+1.4.0 (Aug 4, 2015)
+-------------------
+
+- Dropped support for python 3.2 becuase of unicode literals do not exists in python 3.2.
+- Fixed logging & raised exceptions when using unicode characters inside schemas/data/filenames.
+- Reworked all RuleError exceptions to now have better exception messages.
+- RuleError exceptions now have a unique 'error_key' that can make it easier to identify what error it is.
+- Paths for RuleErrors have been moved inside the exception as a variable.
+- Rewrote all SchemaConflict exceptions to be more human readable.
+
+
+1.3.0 (Jul 14, 2015)
+--------------------
+
+- Rewrote most of the error messages to be more human readable. See `docs/Upgrade Instructions.md`
+ for more details.
+- It is now possible to use the exceptions that was raised for each validation error. It can be
+ found in the variable `c.validation_errors_exceptions`. They contain more detailed information
+ about the error.
+
+
+1.2.0 (May 19, 2015)
+--------------------
+
+- This feature is NEW and EXPERIMENTAL.
+ Implemented support for multiple values inside in a sequence.
+ This will allow the defenition of different types that one sequence can contain. You can either require
+ each value in the sequence to be valid against one to all of the different possibilities.
+ Tests show that it still maintains backward compatibility with all old schemas but it can't be guarantee.
+ If you find a regression in this release please file a bug report so it can be fixed ASAP.
+- This feature is NEW and EXPERIMENTAL.
+ Added ability to define python files that can be used to have custom python code/functions that can be
+ called on all types so that custom/extra validation can be done on all data structures.
+- Add new keyword 'func' that is a string and is used to point to a function loaded via the extension system.
+- Add new keyword 'extensions' that can only be used on the top level of the schema. It is should be a list
+ with strings of files that should be loaded by the extension system. Paths can be relative or absolute.
+- New cli option '-e FILE' or '--extension FILE' that can be used to load extension files from cli.
+- Fixed a bug where types did not raise exceptions properly. If schema said it should be a map but data was
+ a sequence, no validation error was raised in earlier versions but now it raises a 'NotSequenceError' or
+ 'NotMappingError'.
+
+
+1.1.0 (Apr 4, 2015)
+-------------------
+
+- Rework cli string that docopt uses. Removed redundant flags that docopt provides [--version & --help]
+- Add support for timestamp validation
+- Add new runtime dependency 'python-dateutil' that is used to validate timestamps
+- Change how 'any' keyword is implemented to now accept anything and not just the implemented types. (See Upgrade Instructions document for migration details)
+
+
+
+1.0.1 (Mar 8, 2015)
+-------------------
+
+Switched back to semantic version numbering for this lib.
+
+- After the release of `15.01` the version schema was changed back from the <year>.<month> style version schema back to semantic version names. One big problem with this change is that `pypi` can't handle the change back to semantic names very well and because of this I had to remove the old releases from pypi and replace it with a single version `1.0.1`.
+- No matter what version you were using you should consider upgrading to `1.0.1`. The difference between the two versions is very small and contains mostly bugfixes and added improvements.
+- The old releases can still be obtained from `github.com` and if you really need the old version you can add the download url to your `requirements.txt` file.
+
+
+15.01 (Jan 17, 2015)
+--------------------
+
+- Fixed a bug in unique validation for mapping keys [See: PR-12] (Gonditeniz)
+
+
+
+14.12 (Dec 24, 2014)
+--------------------
+
+- Fixed broken regex matching on map keys.
+- Source files with file ending `.yml` can now be loaded
+- Added aliases to some directives to make it easier/faster to write
+ * `sequence` --> `seq`
+ * `mapping` --> `map`
+ * `required` --> `req`
+ * `regex` --> `re`
+- Reworked all testing files to reduce number of files
+
+
+
+14.08 (Aug 24, 2014)
+--------------------
+
+- First version to be uploaded to pypi
+- Keyword 'range' can now be applied to map & seq types.
+- Added many more test files.
+- Keyword 'length' was removed because 'range' can handle all cases now.
+- Keyword 'range' now correctly checks the internal keys to be integers
+- Major update to testing and increased coverage.
+
+
+
+14.06.1 (Jun 24, 2014)
+----------------------
+
+- New feature "partial schema". Define a small schema with a ID that can be reused at other places in the schema. See readme for details.
+- New directive "include" that is used to include a partial schema at the specefied location.
+- Cli and Core() now can handle multiple schema files.
+- Directive "pattern" can no longer be used with map to validate all keys against that regex. Use "regex;" inside "mapping:"
+- 'none' can now be used as a type
+- Many more tests added
+
+
+
+14.06 (Jun 7, 2014)
+-------------------
+
+- New version scheme [YY.MM(.Minor-Release)]
+- Added TravisCI support
+- Update runtime dependency docopt to 0.6.1
+- Update runtime dependency pyyaml to 3.11
+- Huge refactoring of logging and how it works. Logging config files is now removed and everything is alot simpler
+- Cleanup some checks that docopt now handles
+- New keyword "regex;<regex-pattern>" that can be used as a key in map to give more flexibility when validating map keys
+- New keyword "matching-rule" that can be used to control how keys should be matched
+- Added python 3.4 & python 2.7 support (See TravisCI tests for status)
+- Dropped python 3.1 support
+- Alot of refactoring of testing code.
+- Tests should now be runned with "nosetests" and not "python runtests.py"
+- Refactored alot of exceptions to be more specific (SchemaError and RuleError for example) and not a generic Exception
+- Parsed rules is now stored correctly in Core() so it can be tested from the outside
+
+
+
+0.1.2 (Jan 26, 2013)
+--------------------
+
+- Added new and experimental validation rule allowempty. (See README for more info)
+- Added TODO tracking file.
+- Reworked the CLI to now use docopt and removede argparse.
+- Implemented more typechecks, float, number, text, any
+- Now suports python 3.3.x
+- No longer support any python 2.x.y version
+- Enabled pattern for map rule. It enables the validation of all keys in that map. (See README for more info)
+- Alot more test files and now tests source_data and schema_data input arguments to core.py
+- Alot of cleanup in the test suit
+
+
+
+0.1.1 (Jan 21, 2013)
+--------------------
+
+- Reworked the structure of the project to be more clean and easy to find stuff.
+- lib/ folder is now removed and all contents is placed in the root of the project
+- All scripts is now moved to its own folder scripts/ (To use the script during dev the path to the root of the project must be in your python path somehow, recomended is to create a virtualenv and export the correct path when it activates)
+- New make target 'cleanegg'
+- Fixed path bugs in Makefile
+- Fixed path bugs in Manifest
+
+
+
+0.1.0 (Jan 20, 2013)
+--------------------
+
+- Initial stable release of pyKwalify.
+- All functions is not currently implemented but the cli/lib can be used but probably with some bugs.
+- This should be considered a Alpha release used for bug and stable testing and to be based on further new feature requests for the next version.
+- Implemented most validation rules from the original Java version of kwalify. Some is currently not implemented and can be found via [NYI] tag in output, doc & code.
+- Installable via pip (Not the official online pip repo but from the releases folder found in this repo)
+- Supports YAML & JSON files from cli and any dict/list data structure if used in lib mode.
+- Uses pythons internal logging functionality and default logging output can be changed by changing logging.ini (python 3.1.x) or logging.yaml (python 3.2.x) to change the default logging output, or use -v cli input argument to change the logging level. If in lib mode it uses your implemented python std logging.
+ \ No newline at end of file
diff --git a/docs/testing.rst b/docs/testing.rst
new file mode 100644
index 0000000..bcfda4a
--- /dev/null
+++ b/docs/testing.rst
@@ -0,0 +1,20 @@
+Testing
+=======
+
+Install test/dev requirements with
+
+.. code-block:: bash
+
+ pip install -r dev-requirements.txt
+
+Run tests with
+
+.. code-block:: bash
+
+ py.test
+
+or if you want to test against all python versions and pep8
+
+.. code-block:: bash
+
+ tox
diff --git a/docs/upgrade-instructions.rst b/docs/upgrade-instructions.rst
new file mode 100644
index 0000000..36fa128
--- /dev/null
+++ b/docs/upgrade-instructions.rst
@@ -0,0 +1,74 @@
+Upgrading instructions
+======================
+
+This document describes all major changes to the validation rules and the API that could cause existing schemas to break.
+If new types were added, they will not be described here because it will not break existing schemas.
+
+
+1.5.x --> 1.6.0
+---------------
+
+ruamel.yaml is now possible to use as a drop-in replacement for PyYaml. Install it with *pip install 'pykwalify[ruamel]'* for production use and with *pip install -e '.[ruamel]'* for development use.
+
+Several new keywords and types was added. It should bring more copmatiblity with original kwalify spec, but they can also break existsing schemas.
+
+SECURITY: Please note that if you are executing user provided schemas there is a security risk in using the assert keyword.
+
+Fixed several small bugs that have a high risk of causing validations to change behaviour from earlier versions. Many of the bugs was only found in complex schemas and data structures.
+
+Default rule is now possible to be specefied with key ``=`` so if you have a key in your schema it will now be considered default validation and not a plain key.
+
+New CLI flags was added. They are all optional and only introduce new (opt-in) functionality.
+
+Reworked how UTF-8 is handled. Code should now be fully compatible internally with UTF-8/unicode encodings. Docs has been updated to help if you still have errors.
+
+If the type check fails it will no longer continue to check any other keywords and report the errors for them.
+Before when it continued to process keywords, it would lead to errors that made no sense when keywords was not supposed to even be available for some types.
+This can cause less errors to appear when running your schemas.
+
+A major change was done to the default type. It is now string so if you do not specify the type in the schema it will default back to string. This change is based on the original kwalify spec/code.
+
+Keywords ``desc``, ``example``, ``name`` now enforces correct value type (str) even if the values them self have no impact on the validation.
+
+
+
+1.4.x --> 1.5.0
+---------------
+
+Regex received some fixes, so make sure your schema files are still compatible and do not produce any new errors.
+
+
+
+1.3.0 --> 1.4.0
+---------------
+
+Python 3.2 support has been dropped. It was going to be dropped when python 3.5 was released, but this made supporting python 2 & 3 at the same time easier now when fixing unicode support.
+
+All logging and exception messages have been fixed to work with unicode characters in schema and data files. If you use this in lib mode then you should test your code to ensure it is still compatible.
+
+If you use ``RuleError`` in your code, you must update to use the new ``msg`` and ``error_key`` variables.
+
+If you use ``SchemaConflict`` in your code, you must update to use the new ``msg`` and ``error_key`` variables.
+
+
+
+1.2.0 --> 1.3.0
+---------------
+
+Almost all validation error messages have been updated. If you are dependent on the error messages that is located in the variable ``c.validation_errors`` you must check if your code must be updated to use the new error messages.
+
+When parsing the error messages yourself, you now have access to the exceptions and more detailed variables containing the ``msg``, ``path``, ``key``, ``regex`` and ``value`` for each validation error.
+
+
+
+1.1.0 --> 1.2.0
+---------------
+
+Because of the new multiple sequence item feature all old schemas should be tested to verify that they still work as expected and no regressions have been introduced.
+
+
+
+Anything prior to 1.0.1 --> 1.1.0
+---------------------------------
+
+In release 1.1.0 the type ``any`` was changed so that it now accept anything no matter what the value is. In previous releases it was only valid if the data was any of the implemented types. The one time your schema will break is if you use ``any`` and only want one of the implemented types.
diff --git a/docs/validation-rules.rst b/docs/validation-rules.rst
new file mode 100644
index 0000000..a5becef
--- /dev/null
+++ b/docs/validation-rules.rst
@@ -0,0 +1,646 @@
+Validation rules
+================
+
+PyKwalify supports all rules implemented by the original kwalify and include many more to extend the specification.
+
+
+
+type
+----
+
+A ``type`` specifies what rules and constraints should be applied to this node in the data structure.
+
+The following types are available:
+
+ - **any**
+ - Will always be true no matter what the value is, even unimplemented types
+
+ - **bool**
+ - Only **True**/**False** validates. Integers or strings like ``0`` or ``1``, ``"True"`` or ``"False"`` do not validate for bool
+
+ - **date**
+ - A string or datetime.date object that follows a date format
+
+ - **float**
+ - Any object that is a float type, or object that python can interpret as a float with the following python code ``float(obj)``. Scientific notation is supported for this type, for example ``1e-06``.
+
+ - **int**
+ - Validates only for integers and not floats
+
+ - **mapping** or **map**
+ - Validates only for ``dict`` objects
+
+ - **none**
+ - Validates only for ``None`` values
+
+ - **number**
+ - Validates if value is **int** or **float**
+
+ - **scalar**
+ - Validates for all but **seq** or **map**. None values will also fail validation.
+
+ - **sequence** or **seq**
+ - Validates for lists
+
+ - **str**
+ - Validates if value is a python **string** object
+
+ - **text**
+ - Validates if value is **str** or **number**
+
+ - **time**
+ - Not yet implemented [NYI]
+
+ - **timestamp**
+ - Validates for basic timestamp formats
+
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ type: str
+
+.. code-block:: yaml
+
+ # Data
+ 'Foobar'
+
+
+
+Mapping
+-------
+
+A mapping is validates to the ``dict`` datastructure.
+
+Aliases
+
+ - ``mapping``
+ - ``map``
+
+The map type is implicitly assumed when ``mapping`` or its alias ``map`` is present in the rule.
+
+.. code-block:: yaml
+
+ # Schema
+ type: map
+ mapping:
+ key_one:
+ type: str
+
+.. code-block:: yaml
+
+ # Data
+ key_one: 'bar'
+
+The schema below sets the ``mapping`` type implicitly and is also a valid schema.
+
+.. code-block:: yaml
+
+ # Schema
+ map:
+ key_one:
+ type: str
+
+
+There are some constraints which are available only for the map type, and expand its functionality.
+See the ``allowempty``, ``regex;(regex-pattern)`` and ``matching-rule`` sections below for details.
+
+By default, map keys specified in the map rule can be omitted unless they have the ``required`` constraint explictly set to ``True``.
+
+
+
+Sequence
+--------
+
+Sequence/list of values with the given type of values.
+
+The sequence type is implicitly assumed when ``sequence`` or its alias ``seq`` is present in the rule.
+
+Aliases
+
+ - ``sequence``
+ - ``seq``
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ type: seq
+ sequence:
+ - type: str
+
+.. code-block:: yaml
+
+ # Data
+ - 'Foobar'
+ - 'Barfoo'
+
+The schema below sets the ``sequence`` type implicitly and is also a valid schema.
+
+.. code-block:: yaml
+
+ # Schema
+ seq:
+ - type: str
+
+Multiple list entries is supported to enable validation of different types of data inside the sequence.
+
+.. note:: The original kwalify specification only allowed one entry in the list. This has been extended in PyKwalify to give more flexibility when validating.
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ type: seq
+ sequence:
+ - type: str
+ - type: int
+
+.. code-block:: yaml
+
+ # Data
+ - 'Foobar'
+ - 123456
+
+Will be valid.
+
+
+
+Matching
+--------
+
+Multiple subrules can be used within the ``sequence`` block. It can also be nested to any depth, with subrules constraining list items to be sequences of sequences.
+
+The ``matching`` constraint can be used when the type is ``sequence`` to control how the parser handles a list of different subrules for the ``sequence`` block.
+
+- ``any``
+ - Each list item must satisfy at least one subrules
+- ``all``
+ - Each list item must satisfy every subrule
+- ``*``
+ - At least one list item must satisfy at least one subrule
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ type: seq
+ matching: "any"
+ sequence:
+ - type: str
+ - type: seq
+ sequence:
+ - type: int
+
+.. code-block:: yaml
+
+ # Data
+ - - 123
+ - "foobar"
+
+
+
+Timestamp
+---------
+
+Parse a string or integer to determine if it is a valid unix timestamp.
+
+Timestamps must be above ``1`` and below ``2147483647``.
+
+Parsing is done with `python-dateutil`_. You can see all valid formats in `the relevant dateutil documentation`_.
+
+.. _python-dateutil: https://pypi.python.org/pypi/python-dateutil
+
+.. _the relevant dateutil documentation: https://dateutil.readthedocs.org/en/latest/examples.html#parse-examples
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ type: map
+ mapping:
+ d1:
+ type: timestamp
+ d2:
+ type: timestamp
+
+.. code-block:: yaml
+
+ # Data
+ d1: "2015-03-29T18:45:00+00:00"
+ d2: 2147483647
+
+All ``datetime`` objects will validate as a valid timestamp.
+
+PyYaml can sometimes automatically convert data to ``datetime`` objects.
+
+
+
+Date
+----
+
+Parse a string or datetime object to determine if it is a valid date. Date has multiple valid formats based on what standard you are using.
+
+For example 2016-12-31 or 31-12-16 is both valid formats.
+
+If you want to parse a custom format then you can use the `format` keyword to specify a valid datetime parsing syntax. The valid sytax can be found here `python-strptime`_
+
+.. _python-strptime: https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
+
+Example:
+
+.. code-block:: yaml
+
+ # Schema
+ type: date
+
+.. code-block:: yaml
+
+ # Data
+ "2015-12-31"
+
+
+
+Format
+------
+
+Only valid when using `date` or `datetime` type. It helps to define custom datetime formats if the default formats is not enough.
+
+Define the value as a string or a list with foramts as values that uses the builtin python datetime string formatting language. The syntax can be found here `python-strptime`_
+
+.. code-block:: yaml
+
+ # Schema
+ type: date
+ format: "%Y-%m-%d"
+
+.. code-block:: yaml
+
+ # Data
+ "2015-12-31"
+
+
+
+Required
+--------
+
+If the ``required`` constraint is set to ``True``, the key and its value must be present, otherwise a validation error will be raised.
+
+Default is ``False``.
+
+Aliases
+
+ - ``required``
+ - ``req``
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ type: map
+ mapping:
+ key_one:
+ type: str
+ required: True
+
+.. code-block:: yaml
+
+ # Data
+ key_one: foobar
+
+
+
+Enum
+----
+
+Set of possible elements, the value must be a member of this set.
+
+Object in enum must be a list of items.
+
+Currently only exact case matching is implemented. If you need complex validation you should use ``pattern``.
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ type: map
+ mapping:
+ blood:
+ type: str
+ enum: ['A', 'B', 'O', 'AB']
+
+.. code-block:: yaml
+
+ # Data
+ blood: AB
+
+
+
+Pattern
+-------
+
+Specifies a regular expression pattern which the value must satisfy.
+
+Uses `re.match`_ internally. Pattern works for all scalar types.
+
+For using regex to define possible key names in mapping, see ``regex;(regex-pattern)`` instead.
+
+.. _re.match: https://docs.python.org/3/library/re.html#re.match
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ type: map
+ mapping:
+ email:
+ type: str
+ pattern: .+@.+
+
+.. code-block:: yaml
+
+ # Data
+ email: foo@mail.com
+
+
+
+Range
+-----
+
+Range of value between
+ - ``min`` or ``max``
+ - ``min-ex`` or ``max-ex``.
+
+For numeric types (``int``, ``float`` and ``number``), the value must be within the specified range, and for non-numeric types (``map``, ``seq`` and ``str``) the length of the ``dict/list/string`` as given by ``len()`` must be within the range.
+
+For the data value (or length), ``x``, the range can be specified to test for the following:
+ - ``min`` provides an inclusive lower bound, ``a <= x``
+ - ``max`` provides an inclusive upper bound, ``x <= b``
+ - ``min-ex`` provides an exclusive lower bound, ``a < x``
+ - ``max-ex`` provieds an exclusive upper bound, ``x < b``
+
+Non-numeric types require non-negative values for the boundaries, since length can not be negative.
+
+Types ``bool`` and ``any`` are not compatible with ``range``.
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ type: map
+ mapping:
+ password:
+ type: str
+ range:
+ min: 8
+ max: 16
+ age:
+ type: int
+ range:
+ min: 18
+ max-ex: 30
+
+.. code-block:: yaml
+
+ # Data
+ password: foobar123
+ age: 25
+
+
+
+Unique
+------
+
+If unique is set to ``True``, then the sequence cannot contain any repeated entries.
+
+The unique constraint can only be set when the type is ``seq / sequence``. It has no effect when used with ``map / mapping``.
+
+Default is ``False``.
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ type: seq
+ sequence:
+ - type: str
+ unique: True
+
+.. code-block:: yaml
+
+ # Data
+ - users
+ - foo
+ - admin
+
+
+
+Allowempty
+----------
+
+Only applies to ``mapping``.
+
+If ``True``, the map can have keys which are not present in the schema, and these can map to anything.
+
+Any keys which **are** specified in the schema must have values which conform to their corresponding constraints, if they are present.
+
+Default is ``False``.
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ type: map
+ mapping:
+ datasources:
+ type: map
+ allowempty: True
+
+.. code-block:: yaml
+
+ # Data
+ datasources:
+ test1: test1.py
+ test2: test2.py
+
+
+
+Regex;(regex-pattern)
+---------------------
+
+Only applies to ``mapping`` type.
+
+Aliases
+
+ - ``re;(regex-pattern)``
+
+This is only implemented in ``mapping`` where a key inside the mapping keyword can implement this ``regex;(regex-pattern)`` pattern and all keys will be matched against the pattern.
+
+Please note that the regex should be wrapped with ``( )`` and these parentheses will be removed at runtime.
+
+If a match is found then it will be parsed against the subrules on that key. A single key can be matched against multiple regex rules and the normal map rules.
+
+When defining a regex key, ``matching-rule`` should also be set to configure the behaviour when using multiple regexes.
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ type: map
+ matching-rule: 'any'
+ mapping:
+ regex;(mi.+):
+ type: seq
+ sequence:
+ - type: str
+ regex;(me.+):
+ type: number
+
+.. code-block:: yaml
+
+ # Data
+ mic:
+ - foo
+ - bar
+ media: 1
+
+
+
+Matching-rule
+-------------
+
+Only applies to ``mapping``. This enables more finegrained control over how the matching rule should behave when validation regex keys inside mappings.
+
+Currently supported constraint settings are
+
+ - ``any``
+ - One or more of the regex must match.
+
+ - ``all``
+ - All defined regex must match each key.
+
+Default is ``any``.
+
+Example
+
+The following dataset will raise an error because the key ``bar2`` does not fit all of the regex.
+If the constraint was instead ``matching-rule: all``, the same data would be valid because all the keys in the data match one of the regex formats and associated constraints in the schema.
+
+.. code-block:: yaml
+
+ # Schema
+ type: map
+ matching-rule: all
+ mapping:
+ regex;([1-2]$):
+ type: int
+ regex;(^foobar):
+ type: int
+
+.. code-block:: yaml
+
+ # Data
+ foobar1: 1
+ foobar2: 2
+ bar2: 3
+
+
+
+Name
+----
+
+Name of the schema.
+
+This have no effect on the parsing, but is useful for humans to read.
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ name: foobar schema
+
+
+
+Nullable
+--------
+
+If the ``nullable`` constraint is set to ``False``, the key and its value must not be empty, otherwise a validation error will be raised.
+
+Default is ``True``.
+
+Aliases
+
+ - ``nullable``
+ - ``nul``
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ type: map
+ mapping:
+ key_one:
+ type: str
+ nullable: False
+
+.. code-block:: yaml
+
+ # Data
+ key_one: foobar
+
+
+Desc
+----
+
+Description of schema.
+
+This have no effect on the parsing, but is useful for humans to read. Similar to ``name``.
+
+Value for desc ``MUST`` be a string otherwise a ``RuleError`` will be raised upon usage.
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ desc: This schema is very foobar
+
+
+
+Example
+-------
+
+Write a example that can show what values is upported. Or just type any comment into the schema for future reference.
+
+It is possible to use in all levels and places in the schema and have no effect on the parsing,
+but is useful for humans to read. Similar to ``desc``.
+
+Value for ``example`` ``MUST`` be a string otherwise a ``RuleError`` will be raised upon usage.
+
+Example
+
+.. code-block:: yaml
+
+ # Schema
+ example: List of values
+ type: seq
+ sequence:
+ - type: str
+ unique: true
+ example: Each value must be unique and a string
+
diff --git a/examples/utf8-data.yml b/examples/utf8-data.yml
new file mode 100644
index 0000000..68b7450
--- /dev/null
+++ b/examples/utf8-data.yml
@@ -0,0 +1,2 @@
+---
+name: Néron
diff --git a/examples/utf8-schema.yml b/examples/utf8-schema.yml
new file mode 100644
index 0000000..181555f
--- /dev/null
+++ b/examples/utf8-schema.yml
@@ -0,0 +1,7 @@
+---
+
+type: map
+mapping:
+ name:
+ type: str
+ pattern: N.*n
diff --git a/pykwalify/__init__.py b/pykwalify/__init__.py
new file mode 100644
index 0000000..998125f
--- /dev/null
+++ b/pykwalify/__init__.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+
+""" pykwalify """
+
+# python stdlib
+import logging
+import logging.config
+import os
+
+__author__ = 'Grokzen <Grokzen@gmail.com>'
+__version_info__ = (1, 7, 0)
+__version__ = '.'.join(map(str, __version_info__))
+
+
+log_level_to_string_map = {
+ 5: "DEBUG",
+ 4: "INFO",
+ 3: "WARNING",
+ 2: "ERROR",
+ 1: "CRITICAL",
+ 0: "INFO"
+}
+
+
+def init_logging(log_level):
+ """
+ Init logging settings with default set to INFO
+ """
+ log_level = log_level_to_string_map[min(log_level, 5)]
+
+ msg = "%(levelname)s - %(name)s:%(lineno)s - %(message)s" if log_level in os.environ else "%(levelname)s - %(message)s"
+
+ logging_conf = {
+ "version": 1,
+ "root": {
+ "level": log_level,
+ "handlers": ["console"]
+ },
+ "handlers": {
+ "console": {
+ "class": "logging.StreamHandler",
+ "level": log_level,
+ "formatter": "simple",
+ "stream": "ext://sys.stdout"
+ }
+ },
+ "formatters": {
+ "simple": {
+ "format": " {0}".format(msg)
+ }
+ }
+ }
+
+ logging.config.dictConfig(logging_conf)
+
+
+partial_schemas = {}
diff --git a/pykwalify/cli.py b/pykwalify/cli.py
new file mode 100644
index 0000000..e908033
--- /dev/null
+++ b/pykwalify/cli.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+
+""" pyKwalify - cli.py """
+
+# python std lib
+import logging
+import logging.config
+import sys
+
+# 3rd party imports
+from docopt import docopt
+
+
+def parse_cli():
+ """
+ The outline of this function needs to be like this:
+
+ 1. parse arguments
+ 2. validate arguments only, dont go into other logic/code
+ 3. run application logic
+ """
+
+ #
+ # 1. parse cli arguments
+ #
+
+ __docopt__ = """
+usage: pykwalify -d FILE -s FILE ... [-e FILE ...]
+ [--strict-rule-validation] [--fix-ruby-style-regex] [--allow-assertions] [-v ...] [-q]
+
+optional arguments:
+ -d FILE, --data-file FILE the file to be tested
+ -e FILE, --extension FILE file containing python extension
+ -s FILE, --schema-file FILE schema definition file
+ --fix-ruby-style-regex This flag fixes some of the quirks of ruby style regex
+ that is not compatible with python style regex
+ --strict-rule-validation enables strict validation of all keywords for all
+ Rule objects to find unsupported keyword usage
+ --allow-assertions By default assertions is disabled due to security risk.
+ Error will be raised if assertion is used in schema
+ but this flag is not used. This option enables assert keyword.
+ -h, --help show this help message and exit
+ -q, --quiet suppress terminal output
+ -v, --verbose verbose terminal output (multiple -v increases verbosity)
+ --version display the version number and exit
+"""
+
+ # Import pykwalify package
+ import pykwalify
+
+ args = docopt(__docopt__, version=pykwalify.__version__)
+
+ pykwalify.init_logging(1 if args["--quiet"] else args["--verbose"])
+ log = logging.getLogger(__name__)
+
+ #
+ # 2. validate arguments only, dont go into other code/logic
+ #
+
+ log.debug("Setting verbose level: %s", args["--verbose"])
+ log.debug("Arguments from CLI: %s", args)
+
+ return args
+
+
+def run(cli_args):
+ """
+ Split the functionality into 2 methods.
+
+ One for parsing the cli and one that runs the application.
+ """
+ from .core import Core
+
+ c = Core(
+ source_file=cli_args["--data-file"],
+ schema_files=cli_args["--schema-file"],
+ extensions=cli_args['--extension'],
+ strict_rule_validation=cli_args['--strict-rule-validation'],
+ fix_ruby_style_regex=cli_args['--fix-ruby-style-regex'],
+ allow_assertions=cli_args['--allow-assertions'],
+ )
+ c.validate()
+ return c
+
+
+def cli_entrypoint():
+ """
+ Main entrypoint for script. Used by setup.py to automatically
+ create a cli script
+ """
+ # Check minimum version of Python
+ if sys.version_info < (2, 7, 0):
+ sys.stderr.write(u"WARNING: pykwalify: It is recommended to run pykwalify on python version 2.7.x or later...\n\n")
+
+ run(parse_cli())
diff --git a/pykwalify/compat.py b/pykwalify/compat.py
new file mode 100644
index 0000000..fc2df0a
--- /dev/null
+++ b/pykwalify/compat.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+# python stdlib
+import sys
+import logging
+
+
+log = logging.getLogger(__name__)
+
+
+try:
+ from ruamel import yaml
+except ImportError:
+ try:
+ import yaml
+ except ImportError:
+ log.critical("Unable to import either ruamel.yaml or pyyaml")
+ sys.exit(1)
+
+log.debug("Using yaml library: {0}".format(yaml.__file__))
+
+
+if sys.version_info[0] < 3:
+ # Python 2.x.x series
+ basestring = basestring # NOQA: F821
+ unicode = unicode # NOQA: F821
+ bytes = str # NOQA: F821
+
+ def u(x):
+ """ """
+ return x.decode()
+
+ def b(x):
+ """ """
+ return x
+
+ def nativestr(x):
+ """ """
+ return x if isinstance(x, str) else x.encode('utf-8', 'replace')
+else:
+ # Python 3.x.x series
+ basestring = str # NOQA: F821
+ unicode = str # NOQA: F821
+ bytes = bytes # NOQA: F821
+
+ def u(x):
+ """ """
+ return x
+
+ def b(x):
+ """ """
+ return x.encode('latin-1') if not isinstance(x, bytes) else x
+
+ def nativestr(x):
+ """ """
+ return x if isinstance(x, str) else x.decode('utf-8', 'replace')
diff --git a/pykwalify/core.py b/pykwalify/core.py
new file mode 100644
index 0000000..e94ac80
--- /dev/null
+++ b/pykwalify/core.py
@@ -0,0 +1,978 @@
+# -*- coding: utf-8 -*-
+
+""" pyKwalify - core.py """
+
+# python std lib
+import datetime
+import imp
+import json
+import logging
+import os
+import re
+import sys
+import traceback
+import time
+
+# pyKwalify imports
+import pykwalify
+from pykwalify.compat import unicode, nativestr, basestring
+from pykwalify.errors import CoreError, SchemaError, NotMappingError, NotSequenceError
+from pykwalify.rule import Rule
+from pykwalify.types import is_scalar, is_string, tt
+
+# 3rd party imports
+from pykwalify.compat import yaml
+from dateutil.parser import parse
+
+log = logging.getLogger(__name__)
+
+
+class Core(object):
+ """ Core class of pyKwalify """
+
+ def __init__(self, source_file=None, schema_files=None, source_data=None, schema_data=None, extensions=None, strict_rule_validation=False,
+ fix_ruby_style_regex=False, allow_assertions=False,):
+ """
+ :param extensions:
+ List of paths to python files that should be imported and available via 'func' keywork.
+ This list of extensions can be set manually or they should be provided by the `--extension`
+ flag from the cli. This list should not contain files specified by the `extensions` list keyword
+ that can be defined at the top level of the schema.
+ """
+ if schema_files is None:
+ schema_files = []
+ if extensions is None:
+ extensions = []
+
+ log.debug(u"source_file: %s", source_file)
+ log.debug(u"schema_file: %s", schema_files)
+ log.debug(u"source_data: %s", source_data)
+ log.debug(u"schema_data: %s", schema_data)
+ log.debug(u"extension files: %s", extensions)
+
+ self.source = None
+ self.schema = None
+ self.validation_errors = None
+ self.validation_errors_exceptions = None
+ self.root_rule = None
+ self.extensions = extensions
+ self.errors = []
+ self.strict_rule_validation = strict_rule_validation
+ self.fix_ruby_style_regex = fix_ruby_style_regex
+ self.allow_assertions = allow_assertions
+
+ if source_file is not None:
+ if not os.path.exists(source_file):
+ raise CoreError(u"Provided source_file do not exists on disk: {0}".format(source_file))
+
+ with open(source_file, "r") as stream:
+ if source_file.endswith(".json"):
+ self.source = json.load(stream)
+ elif source_file.endswith(".yaml") or source_file.endswith('.yml'):
+ self.source = yaml.safe_load(stream)
+ else:
+ raise CoreError(u"Unable to load source_file. Unknown file format of specified file path: {0}".format(source_file))
+
+ if not isinstance(schema_files, list):
+ raise CoreError(u"schema_files must be of list type")
+
+ # Merge all schema files into one single file for easy parsing
+ if len(schema_files) > 0:
+ schema_data = {}
+ for f in schema_files:
+ if not os.path.exists(f):
+ raise CoreError(u"Provided source_file do not exists on disk : {0}".format(f))
+
+ with open(f, "r") as stream:
+ if f.endswith(".json"):
+ data = json.load(stream)
+ elif f.endswith(".yaml") or f.endswith(".yml"):
+ data = yaml.safe_load(stream)
+ if not data:
+ raise CoreError(u"No data loaded from file : {0}".format(f))
+ else:
+ raise CoreError(u"Unable to load file : {0} : Unknown file format. Supported file endings is [.json, .yaml, .yml]")
+
+ for key in data.keys():
+ if key in schema_data.keys():
+ raise CoreError(u"Parsed key : {0} : two times in schema files...".format(key))
+
+ schema_data = dict(schema_data, **data)
+
+ self.schema = schema_data
+
+ # Nothing was loaded so try the source_data variable
+ if self.source is None:
+ log.debug(u"No source file loaded, trying source data variable")
+ self.source = source_data
+ if self.schema is None:
+ log.debug(u"No schema file loaded, trying schema data variable")
+ self.schema = schema_data
+
+ # Test if anything was loaded
+ if self.source is None:
+ raise CoreError(u"No source file/data was loaded")
+ if self.schema is None:
+ raise CoreError(u"No schema file/data was loaded")
+
+ # Merge any extensions defined in the schema with the provided list of extensions from the cli
+ for f in self.schema.get('extensions', []):
+ self.extensions.append(f)
+
+ if not isinstance(self.extensions, list) and all(isinstance(e, str) for e in self.extensions):
+ raise CoreError(u"Specified extensions must be a list of file paths")
+
+ self._load_extensions()
+
+ if self.strict_rule_validation:
+ log.info("Using strict rule keywords validation...")
+
+ def _load_extensions(self):
+ """
+ Load all extension files into the namespace pykwalify.ext
+ """
+ log.debug(u"loading all extensions : %s", self.extensions)
+
+ self.loaded_extensions = []
+
+ for f in self.extensions:
+ if not os.path.isabs(f):
+ f = os.path.abspath(f)
+
+ if not os.path.exists(f):
+ raise CoreError(u"Extension file: {0} not found on disk".format(f))
+
+ self.loaded_extensions.append(imp.load_source("", f))
+
+ log.debug(self.loaded_extensions)
+ log.debug([dir(m) for m in self.loaded_extensions])
+
+ def validate(self, raise_exception=True):
+ """
+ """
+ log.debug(u"starting core")
+
+ self._start_validate(self.source)
+ self.validation_errors = [unicode(error) for error in self.errors]
+ self.validation_errors_exceptions = self.errors
+
+ if self.errors is None or len(self.errors) == 0:
+ log.info(u"validation.valid")
+ else:
+ log.error(u"validation.invalid")
+ log.error(u" --- All found errors ---")
+ log.error(self.validation_errors)
+ if raise_exception:
+ raise SchemaError(u"Schema validation failed:\n - {error_msg}.".format(
+ error_msg=u'.\n - '.join(self.validation_errors)))
+ else:
+ log.error(u"Errors found but will not raise exception...")
+
+ # Return validated data
+ return self.source
+
+ def _start_validate(self, value=None):
+ """
+ """
+ path = ""
+ self.errors = []
+ done = []
+
+ s = {}
+
+ # Look for schema; tags so they can be parsed before the root rule is parsed
+ for k, v in self.schema.items():
+ if k.startswith("schema;"):
+ log.debug(u"Found partial schema; : %s", v)
+ r = Rule(schema=v)
+ log.debug(u" Partial schema : %s", r)
+ pykwalify.partial_schemas[k.split(";", 1)[1]] = r
+ else:
+ # readd all items that is not schema; so they can be parsed
+ s[k] = v
+
+ self.schema = s
+
+ log.debug(u"Building root rule object")
+ root_rule = Rule(schema=self.schema)
+ self.root_rule = root_rule
+ log.debug(u"Done building root rule")
+ log.debug(u"Root rule: %s", self.root_rule)
+
+ self._validate(value, root_rule, path, done)
+
+ def _validate(self, value, rule, path, done):
+ """
+ """
+ log.debug(u"Core validate")
+ log.debug(u" Root validate : Rule: %s", rule)
+ log.debug(u" Root validate : Rule_type: %s", rule.type)
+ log.debug(u" Root validate : Seq: %s", rule.sequence)
+ log.debug(u" Root validate : Map: %s", rule.mapping)
+ log.debug(u" Root validate : Done: %s", done)
+
+ if rule.required and value is None and not rule.type == 'none':
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"required.novalue : '{path}'",
+ path=path,
+ value=value.encode('unicode_escape') if value else value,
+ ))
+ return
+
+ if not rule.nullable and value is None and not rule.type == 'none':
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"nullable.novalue : '{path}'",
+ path=path,
+ value=value.encode('unicode_escape') if value else value,
+ ))
+ return
+
+ log.debug(u" ? ValidateRule: %s", rule)
+ if rule.include_name is not None:
+ self._validate_include(value, rule, path, done=None)
+ elif rule.sequence is not None:
+ self._validate_sequence(value, rule, path, done=None)
+ elif rule.mapping is not None or rule.allowempty_map:
+ self._validate_mapping(value, rule, path, done=None)
+ else:
+ self._validate_scalar(value, rule, path, done=None)
+
+ def _handle_func(self, value, rule, path, done=None):
+ """
+ Helper function that should check if func is specified for this rule and
+ then handle it for all cases in a generic way.
+ """
+ func = rule.func
+
+ # func keyword is not defined so nothing to do
+ if not func:
+ return
+
+ found_method = False
+
+ for extension in self.loaded_extensions:
+ method = getattr(extension, func, None)
+ if method:
+ found_method = True
+
+ # No exception will should be caught. If one is raised it should bubble up all the way.
+ ret = method(value, rule, path)
+
+ # If False or None or some other object that is interpreted as False
+ if not ret:
+ raise CoreError(u"Error when running extension function : {0}".format(func))
+
+ # Only run the first matched function. Sinc loading order is determined
+ # it should be easy to determine which file is used before others
+ break
+
+ if not found_method:
+ raise CoreError(u"Did not find method '{0}' in any loaded extension file".format(func))
+
+ def _validate_include(self, value, rule, path, done=None):
+ """
+ """
+ # TODO: It is difficult to get a good test case to trigger this if case
+ if rule.include_name is None:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u'Include name not valid',
+ path=path,
+ value=value.encode('unicode_escape')))
+ return
+ include_name = rule.include_name
+ partial_schema_rule = pykwalify.partial_schemas.get(include_name)
+ if not partial_schema_rule:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Cannot find partial schema with name '{include_name}'. Existing partial schemas: '{existing_schemas}'. Path: '{path}'",
+ path=path,
+ value=value,
+ include_name=include_name,
+ existing_schemas=", ".join(sorted(pykwalify.partial_schemas.keys()))))
+ return
+
+ self._validate(value, partial_schema_rule, path, done)
+
+ def _validate_sequence(self, value, rule, path, done=None):
+ """
+ """
+ log.debug(u"Core Validate sequence")
+ log.debug(u" Sequence : Data: %s", value)
+ log.debug(u" Sequence : Rule: %s", rule)
+ log.debug(u" Sequence : RuleType: %s", rule.type)
+ log.debug(u" Sequence : Path: %s", path)
+ log.debug(u" Sequence : Seq: %s", rule.sequence)
+ log.debug(u" Sequence : Map: %s", rule.mapping)
+
+ if len(rule.sequence) <= 0:
+ raise CoreError(u"Sequence must contains atleast one item : {0}".format(path))
+
+ if value is None:
+ log.debug(u" * Core seq: sequence data is None")
+ return
+
+ if not isinstance(value, list):
+ if isinstance(value, str):
+ value = value.encode('unicode_escape')
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ u"Value '{value}' is not a list. Value path: '{path}'",
+ path,
+ value,
+ ))
+ return
+
+ # Handle 'func' argument on this sequence
+ self._handle_func(value, rule, path, done)
+
+ ok_values = []
+ error_tracker = []
+
+ unique_errors = {}
+ map_unique_errors = {}
+
+ for i, item in enumerate(value):
+ processed = []
+
+ for r in rule.sequence:
+ tmp_errors = []
+
+ try:
+ # Create a sub core object to enable error tracking that do not
+ # collide with this Core objects errors
+ tmp_core = Core(source_data={}, schema_data={})
+ tmp_core.fix_ruby_style_regex = self.fix_ruby_style_regex
+ tmp_core.allow_assertions = self.allow_assertions
+ tmp_core.strict_rule_validation = self.strict_rule_validation
+ tmp_core.loaded_extensions = self.loaded_extensions
+ tmp_core._validate(item, r, "{0}/{1}".format(path, i), done)
+ tmp_errors = tmp_core.errors
+ except NotMappingError:
+ # For example: If one type was specified as 'map' but data
+ # was 'str' a exception will be thrown but we should ignore it
+ pass
+ except NotSequenceError:
+ # For example: If one type was specified as 'seq' but data
+ # was 'str' a exception will be thrown but we shold ignore it
+ pass
+
+ processed.append(tmp_errors)
+
+ if r.type == "map":
+ log.debug(u" * Found map inside sequence")
+ unique_keys = []
+
+ if r.mapping is None:
+ log.debug(u" + No rule to apply, prolly because of allowempty: True")
+ return
+
+ for k, _rule in r.mapping.items():
+ log.debug(u" * Key: %s", k)
+ log.debug(u" * Rule: %s", _rule)
+
+ if _rule.unique or _rule.ident:
+ unique_keys.append(k)
+
+ if len(unique_keys) > 0:
+ for v in unique_keys:
+ table = {}
+ for j, V in enumerate(value):
+ # If key do not exists it should be ignored by unique because that is not a broken constraint
+ val = V.get(v, None)
+
+ if val is None:
+ continue
+
+ if val in table:
+ curr_path = "{0}/{1}/{2}".format(path, j, v)
+ prev_path = "{0}/{1}/{2}".format(path, table[val], v)
+ s = SchemaError.SchemaErrorEntry(
+ msg=u"Value '{duplicate}' is not unique. Previous path: '{prev_path}'. Path: '{path}'",
+ path=curr_path,
+ value=value,
+ duplicate=val,
+ prev_path=prev_path,
+ )
+ map_unique_errors[s.__repr__()] = s
+ else:
+ table[val] = j
+ elif r.unique:
+ log.debug(u" * Found unique value in sequence")
+ table = {}
+
+ for j, val in enumerate(value):
+ if val is None:
+ continue
+
+ if val in table:
+ curr_path = "{0}/{1}".format(path, j)
+ prev_path = "{0}/{1}".format(path, table[val])
+ s = SchemaError.SchemaErrorEntry(
+ msg=u"Value '{duplicate}' is not unique. Previous path: '{prev_path}'. Path: '{path}'",
+ path=curr_path,
+ value=value,
+ duplicate=val,
+ prev_path=prev_path,
+ )
+ unique_errors[s.__repr__()] = s
+ else:
+ table[val] = j
+
+ error_tracker.append(processed)
+ no_errors = []
+ for _errors in processed:
+ no_errors.append(len(_errors) == 0)
+
+ if rule.matching == "any":
+ log.debug(u" * any rule %s", True in no_errors)
+ ok_values.append(True in no_errors)
+ elif rule.matching == "all":
+ log.debug(u" * all rule".format(all(no_errors)))
+ ok_values.append(all(no_errors))
+ elif rule.matching == "*":
+ log.debug(u" * star rule", "...")
+ ok_values.append(True)
+
+ for _error in unique_errors:
+ self.errors.append(_error)
+
+ for _error in map_unique_errors:
+ self.errors.append(_error)
+
+ log.debug(u" * ok : %s", ok_values)
+
+ # All values must pass the validation, otherwise add the parsed errors
+ # to the global error list and throw up some error.
+ if not all(ok_values):
+ # Ignore checking for '*' type because it should allways go through
+ if rule.matching == "any":
+ log.debug(u" * Value: %s did not validate against one or more sequence schemas", value)
+ elif rule.matching == "all":
+ log.debug(u" * Value: %s did not validate against all possible sequence schemas", value)
+
+ for i, is_ok in enumerate(ok_values):
+ if not is_ok:
+ for error in error_tracker[i]:
+ for e in error:
+ self.errors.append(e)
+
+ log.debug(u" * Core seq: validation recursivley done...")
+
+ if rule.range is not None:
+ rr = rule.range
+
+ self._validate_range(
+ rr.get("max"),
+ rr.get("min"),
+ rr.get("max-ex"),
+ rr.get("min-ex"),
+ len(value),
+ path,
+ "seq",
+ )
+
+ def _validate_mapping(self, value, rule, path, done=None):
+ """
+ """
+ log.debug(u"Validate mapping")
+ log.debug(u" Mapping : Data: %s", value)
+ log.debug(u" Mapping : Rule: %s", rule)
+ log.debug(u" Mapping : RuleType: %s", rule.type)
+ log.debug(u" Mapping : Path: %s", path)
+ log.debug(u" Mapping : Seq: %s", rule.sequence)
+ log.debug(u" Mapping : Map: %s", rule.mapping)
+
+ if not isinstance(value, dict):
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ u"Value '{value}' is not a dict. Value path: '{path}'",
+ path,
+ value,
+ ))
+ return
+
+ if rule.mapping is None:
+ log.debug(u" + No rule to apply, prolly because of allowempty: True")
+ return
+
+ # Handle 'func' argument on this mapping
+ self._handle_func(value, rule, path, done)
+
+ m = rule.mapping
+ log.debug(u" Mapping: Rule-Mapping: %s", m)
+
+ if rule.range is not None:
+ r = rule.range
+
+ self._validate_range(
+ r.get("max"),
+ r.get("min"),
+ r.get("max-ex"),
+ r.get("min-ex"),
+ len(value),
+ path,
+ "map",
+ )
+
+ for k, rr in m.items():
+ # Handle if the value of the key contains a include keyword
+ if rr.include_name is not None:
+ include_name = rr.include_name
+ partial_schema_rule = pykwalify.partial_schemas.get(include_name)
+
+ if not partial_schema_rule:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Cannot find partial schema with name '{include_name}'. Existing partial schemas: '{existing_schemas}'. Path: '{path}'",
+ path=path,
+ value=value,
+ include_name=include_name,
+ existing_schemas=", ".join(sorted(pykwalify.partial_schemas.keys()))))
+ return
+
+ rr = partial_schema_rule
+
+ # Find out if this is a regex rule
+ is_regex_rule = False
+ required_regex = ""
+ for regex_rule in rule.regex_mappings:
+ if k == "regex;({})".format(regex_rule.map_regex_rule) or k == "re;({})".format(regex_rule.map_regex_rule):
+ is_regex_rule = True
+ required_regex = regex_rule.map_regex_rule
+
+ # Check for the presense of the required key
+ is_present = False
+ if not is_regex_rule:
+ is_present = k in value
+ else:
+ is_present = any([re.search(required_regex, v) for v in value])
+
+ # Specifying =: as key is considered the "default" if no other keys match
+ if rr.required and not is_present and k != "=":
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Cannot find required key '{key}'. Path: '{path}'",
+ path=path,
+ value=value,
+ key=k))
+ if k not in value and rr.default is not None:
+ value[k] = rr.default
+
+ for k, v in value.items():
+ # If no other case was a match, check if a default mapping is valid/present and use
+ # that one instead
+ r = m.get(k, m.get('='))
+ log.debug(u" Mapping-value : %s", m)
+ log.debug(u" Mapping-value : %s %s", k, v)
+ log.debug(u" Mapping-value : %s", r)
+
+ regex_mappings = [(regex_rule, re.search(regex_rule.map_regex_rule, str(k))) for regex_rule in rule.regex_mappings]
+ log.debug(u" Mapping-value: Mapping Regex matches: %s", regex_mappings)
+
+ if r is not None:
+ # validate recursively
+ log.debug(u" Mapping-value: Core Map: validate recursively: %s", r)
+ self._validate(v, r, u"{0}/{1}".format(path, k), done)
+ elif any(regex_mappings):
+ sub_regex_result = []
+
+ # Found at least one that matches a mapping regex
+ for mm in regex_mappings:
+ if mm[1]:
+ log.debug(u" Mapping-value: Matching regex patter: %s", mm[0])
+ self._validate(v, mm[0], "{0}/{1}".format(path, k), done)
+ sub_regex_result.append(True)
+ else:
+ sub_regex_result.append(False)
+
+ if rule.matching_rule == "any":
+ if any(sub_regex_result):
+ log.debug(u" Mapping-value: Matched at least one regex")
+ else:
+ log.debug(u" Mapping-value: No regex matched")
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Key '{key}' does not match any regex '{regex}'. Path: '{path}'",
+ path=path,
+ value=value,
+ key=k,
+ regex="' or '".join(sorted([mm[0].map_regex_rule for mm in regex_mappings]))))
+ elif rule.matching_rule == "all":
+ if all(sub_regex_result):
+ log.debug(u" Mapping-value: Matched all regex rules")
+ else:
+ log.debug(u" Mapping-value: Did not match all regex rules")
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Key '{key}' does not match all regex '{regex}'. Path: '{path}'",
+ path=path,
+ value=value,
+ key=k,
+ regex="' and '".join(sorted([mm[0].map_regex_rule for mm in regex_mappings]))))
+ else:
+ log.debug(u" Mapping-value: No mapping rule defined")
+ else:
+ if not rule.allowempty_map:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Key '{key}' was not defined. Path: '{path}'",
+ path=path,
+ value=value,
+ key=k))
+
+ def _validate_scalar(self, value, rule, path, done=None):
+ """
+ """
+ log.debug(u"Validate scalar")
+ log.debug(u" Scalar : Value : %s", value)
+ log.debug(u" Scalar : Rule : %s", rule)
+ log.debug(u" Scalar : RuleType : %s", rule.type)
+ log.debug(u" Scalar : Path %s", path)
+
+ # Handle 'func' argument on this scalar
+ self._handle_func(value, rule, path, done)
+
+ if rule.assertion is not None:
+ self._validate_assert(rule, value, path)
+
+ if value is None:
+ return True
+
+ if rule.enum is not None and value not in rule.enum:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Enum '{value}' does not exist. Path: '{path}'",
+ path=path,
+ value=nativestr(value) if tt['str'](value) else value,
+ ))
+
+ # Set default value
+ if rule.default and value is None:
+ value = rule.default
+
+ if not self._validate_scalar_type(value, rule.type, path):
+ return
+
+ if value is None:
+ return
+
+ if rule.pattern is not None:
+ #
+ # Try to trim away the surrounding slashes around ruby style /<regex>/ if they are defined.
+ # This is a quirk from ruby that they define regex patterns with surrounding slashes.
+ # Docs on how ruby regex works can be found here: https://ruby-doc.org/core-2.4.0/Regexp.html
+ # The original ruby implementation uses this code to validate patterns
+ # unless value.to_s =~ rule.regexp
+ # Becuase python do not work with surrounding slashes we have to trim them away in order to make the regex work
+ #
+ if rule.pattern.startswith('/') and rule.pattern.endswith('/') and self.fix_ruby_style_regex:
+ rule.pattern = rule.pattern[1:-1]
+ log.debug("Trimming slashes around ruby style regex. New pattern value: '{0}'".format(rule.pattern))
+
+ try:
+ log.debug("Matching pattern '{0}' to regex '{1}".format(rule.pattern, value))
+ res = re.match(rule.pattern, value, re.UNICODE)
+ except TypeError:
+ res = None
+
+ if res is None: # Not matching
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Value '{value}' does not match pattern '{pattern}'. Path: '{path}'",
+ path=path,
+ value=nativestr(str(value)),
+ pattern=rule._pattern))
+ else:
+ log.debug("Pattern matched...")
+
+ if rule.range is not None:
+ if not is_scalar(value):
+ raise CoreError(u"value is not a valid scalar")
+
+ r = rule.range
+
+ try:
+ v = len(value)
+ value = v
+ except Exception:
+ pass
+
+ self._validate_range(
+ r.get("max"),
+ r.get("min"),
+ r.get("max-ex"),
+ r.get("min-ex"),
+ value,
+ path,
+ "scalar",
+ )
+
+ if rule.length is not None:
+ self._validate_length(
+ rule.length,
+ value,
+ path,
+ 'scalar',
+ )
+
+ # Validate timestamp
+ if rule.type == "timestamp":
+ self._validate_scalar_timestamp(value, path)
+
+ if rule.type == "date":
+ if not is_scalar(value):
+ raise CoreError(u'value is not a valid scalar')
+ date_format = rule.format
+ self._validate_scalar_date(value, date_format, path)
+
+ def _validate_scalar_timestamp(self, timestamp_value, path):
+ """
+ """
+ def _check_int_timestamp_boundaries(timestamp):
+ """
+ """
+ if timestamp < 1:
+ # Timestamp integers can't be negative
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Integer value of timestamp can't be below 0",
+ path=path,
+ value=timestamp,
+ timestamp=str(timestamp),
+ ))
+ if timestamp > 2147483647:
+ # Timestamp integers can't be above the upper limit of
+ # 32 bit integers
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Integer value of timestamp can't be above 2147483647",
+ path=path,
+ value=timestamp,
+ timestamp=str(timestamp),
+ ))
+
+ if isinstance(timestamp_value, (int, float)):
+ _check_int_timestamp_boundaries(timestamp_value)
+ elif isinstance(timestamp_value, datetime.datetime):
+ # Datetime objects currently have nothing to validate.
+ # In the future, more options will be added to datetime validation
+ pass
+ elif isinstance(timestamp_value, basestring):
+ v = timestamp_value.strip()
+
+ # parse("") will give a valid date but it should not be
+ # considered a valid timestamp
+ if v == "":
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Timestamp value is empty. Path: '{path}'",
+ path=path,
+ value=nativestr(timestamp_value),
+ timestamp=nativestr(timestamp_value)))
+ else:
+ # A string can contain a valid unit timestamp integer. Check if it is valid and validate it
+ try:
+ int_v = int(v)
+ _check_int_timestamp_boundaries(int_v)
+ except ValueError:
+ # Just continue to parse it as a timestamp
+ try:
+ parse(timestamp_value)
+ # If it can be parsed then it is valid
+ except Exception:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Timestamp: '{timestamp}'' is invalid. Path: '{path}'",
+ path=path,
+ value=nativestr(timestamp_value),
+ timestamp=nativestr(timestamp_value)))
+ else:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Not a valid timestamp",
+ path=path,
+ value=timestamp_value,
+ timestamp=timestamp_value,
+ ))
+
+ def _validate_scalar_date(self, date_value, date_formats, path):
+ log.debug(u"Validate date : %(value)s : %(format)s : %(path)s" % {
+ 'value': date_value,
+ 'format': date_formats,
+ 'path': path,
+ })
+
+ if isinstance(date_value, str):
+ # If a date_format is specefied then use strptime on all formats
+ # If no date_format is specefied then use dateutils.parse() to test the value
+ log.debug(date_formats)
+
+ if date_formats:
+ # Run through all date_formats and it is valid if atleast one of them passed time.strptime() parsing
+ valid = False
+ for date_format in date_formats:
+ try:
+ time.strptime(date_value, date_format)
+ valid = True
+ except ValueError:
+ pass
+
+ if not valid:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Not a valid date: {value} format: {format}. Path: '{path}'",
+ path=path,
+ value=date_value,
+ format=date_format,
+ ))
+ return
+ else:
+ try:
+ parse(date_value)
+ except ValueError:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Not a valid date: {value} Path: '{path}'",
+ path=path,
+ value=date_value,
+ ))
+ elif isinstance(date_value, (datetime.date, datetime.datetime)):
+ # If the object already is a datetime or date object it passes validation
+ pass
+ else:
+ # If value is any other type then raise error
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Not a valid date: {value} date must be a string or a datetime.date not a '{type}'",
+ path=path,
+ value=date_value,
+ type=type(date_value).__name__,
+ ))
+
+ def _validate_length(self, rule, value, path, prefix):
+ if not is_string(value):
+ raise CoreError("Value: '{0}' must be a 'str' type for length check to work".format(value))
+
+ value_length = len(str(value))
+ max_, min_, max_ex, min_ex = rule.get('max'), rule.get('min'), rule.get('max-ex'), rule.get('min-ex')
+
+ log.debug(
+ u"Validate length : %s : %s : %s : %s : %s : %s",
+ max, min, max_ex, min_ex, value, path,
+ )
+
+ if max_ is not None and max_ < value_length:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Value: '{value_str}' has length of '{value}', greater than max limit '{max_}'. Path: '{path}'",
+ value_str=value,
+ path=path,
+ value=len(value),
+ prefix=prefix,
+ max_=max_))
+
+ if min_ is not None and min_ > value_length:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Value: '{value_str}' has length of '{value}', greater than min limit '{min_}'. Path: '{path}'",
+ value_str=value,
+ path=path,
+ value=len(value),
+ prefix=prefix,
+ min_=min_))
+
+ if max_ex is not None and max_ex <= value_length:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Value: '{value_str}' has length of '{value}', greater than max_ex limit '{max_ex}'. Path: '{path}'",
+ value_str=value,
+ path=path,
+ value=len(value),
+ prefix=prefix,
+ max_ex=max_ex))
+
+ if min_ex is not None and min_ex >= value_length:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Value: '{value_str}' has length of '{value}', greater than min_ex limit '{min_ex}'. Path: '{path}'",
+ value_str=value,
+ path=path,
+ value=len(value),
+ prefix=prefix,
+ min_ex=min_ex))
+
+ def _validate_assert(self, rule, value, path):
+ if not self.allow_assertions:
+ raise CoreError('To allow usage of keyword "assert" you must use cli flag "--allow-assertions" or set the keyword "allow_assert" in Core class')
+
+ # Small hack to make strings work as a value.
+ if isinstance(value, str):
+ assert_value_str = '"{0}"'.format(value)
+ else:
+ assert_value_str = '{0}'.format(value)
+
+ assertion_string = "val = {0}; assert {1}".format(assert_value_str, rule.assertion)
+ try:
+ exec(assertion_string, {}, {})
+ except AssertionError:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Value: '{0}' assertion expression failed ({1})".format(value, rule.assertion),
+ path=path,
+ value=value,
+ ))
+ return
+ except Exception as err:
+ error_class = err.__class__.__name__
+ detail = err.args[0]
+ cl, exc, tb = sys.exc_info()
+ line_number = traceback.extract_tb(tb)[-1][1]
+ raise Exception("Unknown error during assertion\n{0}\n{1}\n{2}\n{3}\n{4}\n{5}".format(
+ error_class, detail, cl, exc, tb, line_number,
+ ))
+
+ def _validate_range(self, max_, min_, max_ex, min_ex, value, path, prefix):
+ """
+ Validate that value is within range values.
+ """
+ if not isinstance(value, int) and not isinstance(value, float):
+ raise CoreError("Value must be a integer type")
+
+ log.debug(
+ u"Validate range : %s : %s : %s : %s : %s : %s",
+ max_,
+ min_,
+ max_ex,
+ min_ex,
+ value,
+ path,
+ )
+
+ if max_ is not None and max_ < value:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Type '{prefix}' has size of '{value}', greater than max limit '{max_}'. Path: '{path}'",
+ path=path,
+ value=nativestr(value) if tt['str'](value) else value,
+ prefix=prefix,
+ max_=max_))
+
+ if min_ is not None and min_ > value:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Type '{prefix}' has size of '{value}', less than min limit '{min_}'. Path: '{path}'",
+ path=path,
+ value=nativestr(value) if tt['str'](value) else value,
+ prefix=prefix,
+ min_=min_))
+
+ if max_ex is not None and max_ex <= value:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Type '{prefix}' has size of '{value}', greater than or equals to max limit(exclusive) '{max_ex}'. Path: '{path}'",
+ path=path,
+ value=nativestr(value) if tt['str'](value) else value,
+ prefix=prefix,
+ max_ex=max_ex))
+
+ if min_ex is not None and min_ex >= value:
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Type '{prefix}' has size of '{value}', less than or equals to min limit(exclusive) '{min_ex}'. Path: '{path}'",
+ path=path,
+ value=nativestr(value) if tt['str'](value) else value,
+ prefix=prefix,
+ min_ex=min_ex))
+
+ def _validate_scalar_type(self, value, t, path):
+ """
+ """
+ log.debug(u" # Core scalar: validating scalar type : %s", t)
+ log.debug(u" # Core scalar: scalar type: %s", type(value))
+
+ try:
+ if not tt[t](value):
+ self.errors.append(SchemaError.SchemaErrorEntry(
+ msg=u"Value '{value}' is not of type '{scalar_type}'. Path: '{path}'",
+ path=path,
+ value=unicode(value) if tt['str'](value) else value,
+ scalar_type=t))
+ return False
+ return True
+ except KeyError as e:
+ # Type not found in valid types mapping
+ log.debug(e)
+ raise CoreError(u"Unknown type check: {0!s} : {1!s} : {2!s}".format(path, value, t))
diff --git a/pykwalify/errors.py b/pykwalify/errors.py
new file mode 100644
index 0000000..c9f9b54
--- /dev/null
+++ b/pykwalify/errors.py
@@ -0,0 +1,237 @@
+# -*- coding: utf-8 -*-
+
+""" pyKwalify - errors.py """
+
+# python stdlib
+from pykwalify.compat import basestring
+
+retcodes = {
+ # PyKwalifyExit
+ 0: 'noerror',
+
+ # UnknownError
+ 1: 'unknownerror',
+
+ # SchemaError
+ # e.g. when a rule or the core finds an error
+ 2: 'schemaerror',
+
+ # CoreError
+ # e.g. when the core finds an error that is not a SchemaError
+ 3: 'coreerror',
+
+ # RuleError
+ # e.g. when the rule class finds an error that is not a SchemaError, similar to CoreError
+ 4: 'ruleerror',
+
+ # SchemaConflict
+ # e.g. when a schema conflict occurs
+ 5: 'schemaconflict',
+
+ # NotMappingError
+ # e.g. when a value is not a mapping when it was expected it should be
+ 6: 'notmaperror',
+
+ # NotSequenceError
+ # e.g. when a value is not a sequence when it was expected it should be
+ 7: 'notsequenceerror',
+}
+
+
+retnames = dict((v, k) for (k, v) in retcodes.items())
+
+
+class PyKwalifyException(RuntimeError):
+ """
+ """
+
+ def __init__(self, msg=None, error_key=None, retcode=None, path=None):
+ """
+ Arguments:
+ - `msg`: a string
+ - `error_key`: a unique string that makes it easier to identify what error it is
+ - `retcode`: an integer, defined in PyKwalify.errors.retcodes
+ """
+ self.msg = msg or ""
+ self.retcode = retcode or retnames['unknownerror']
+ self.retname = retcodes[retcode]
+ self.error_key = error_key
+ self.path = path or "/"
+
+ def __str__(self):
+ """
+ """
+ # <PyKwalifyException msg='foo bar' retcode=1>
+ # kwargs = []
+ # if self.msg:
+ # kwargs.append("msg='{0}'".format(self.msg))
+ # if self.retcode != retnames['noerror']:
+ # kwargs.append("retcode=%d" % self.retcode)
+ # if kwargs:
+ # kwargs.insert(0, '')
+ # return "<{0}{1}>".format(self.__class__.__name__, ' '.join(kwargs))
+
+ # <PyKwalifyException: error code 1: foo bar>
+ kwargs = []
+ if self.retcode != retnames['noerror']:
+ kwargs.append("error code {0}".format(self.retcode))
+ if self.msg:
+ kwargs.append(self.msg)
+ if kwargs:
+ kwargs.insert(0, '')
+ if self.path:
+ kwargs.append("Path: '{0}'".format(self.path))
+ return "<{0}{1}>".format(self.__class__.__name__, ': '.join(kwargs))
+
+ def __repr__(self):
+ """
+ """
+ kwargs = []
+ if self.msg:
+ kwargs.append("msg='{0}'".format(self.msg))
+ return "{0}({1})".format(self.__class__.__name__, ', '.join(kwargs))
+
+ def msg():
+ doc = """ """
+
+ def fget(self):
+ return self._msg
+
+ def fset(self, value):
+ assert isinstance(value, basestring), "argument is not string"
+ self._msg = value
+
+ return locals()
+ msg = property(**msg())
+
+ def retcode():
+ doc = """ """
+
+ def fget(self):
+ return self._retcode
+
+ def fset(self, value):
+ assert isinstance(value, int), "argument is not integer"
+ self._retcode = value
+
+ return locals()
+ retcode = property(**retcode())
+
+ def retname():
+ doc = """ """
+
+ def fget(self):
+ return self._retname
+
+ def fset(self, value):
+ assert isinstance(value, str), "argument is not string"
+ self._retname = value
+
+ return locals()
+ retname = property(**retname())
+
+
+class UnknownError(PyKwalifyException):
+ """
+ """
+ def __init__(self, *args, **kwargs):
+ """
+ """
+ assert 'retcode' not in kwargs, "keyword retcode implicitly defined"
+ super(self.__class__, self).__init__(
+ retcode=retnames['unknownerror'],
+ *args, **kwargs
+ )
+
+
+class SchemaError(PyKwalifyException):
+ """
+ """
+ class SchemaErrorEntry(object):
+ """
+ """
+ def __init__(self, msg, path, value, **kwargs):
+ """
+ """
+ self.msg = msg
+ self.path = path
+ self.value = value
+ for key, value in kwargs.items():
+ self.__setattr__(key, value)
+
+ def __repr__(self):
+ return self.msg.format(**self.__dict__)
+
+ def __init__(self, *args, **kwargs):
+ """
+ """
+ assert "retcode" not in kwargs, "keyword retcode implicitly defined"
+ super(self.__class__, self).__init__(
+ retcode=retnames["schemaerror"],
+ *args, **kwargs
+ )
+
+
+class CoreError(PyKwalifyException):
+ """
+ """
+ def __init__(self, *args, **kwargs):
+ """
+ """
+ assert "retcode" not in kwargs, "keyword retcode implicitly defined"
+ super(self.__class__, self).__init__(
+ retcode=retnames["coreerror"],
+ *args, **kwargs
+ )
+
+
+class NotMappingError(PyKwalifyException):
+ """
+ """
+ def __init__(self, *args, **kwargs):
+ """
+ """
+ assert "retcode" not in kwargs, "keyword retcode implicitly defined"
+ super(self.__class__, self).__init__(
+ retcode=retnames['notmaperror'],
+ *args, **kwargs
+ )
+
+
+class NotSequenceError(PyKwalifyException):
+ """
+ """
+ def __init__(self, *args, **kwargs):
+ """
+ """
+ assert "retcode" not in kwargs, "keyword retcode implicitly defined"
+ super(self.__class__, self).__init__(
+ retcode=retnames['notsequenceerror'],
+ *args, **kwargs
+ )
+
+
+class RuleError(PyKwalifyException):
+ """
+ """
+ def __init__(self, *args, **kwargs):
+ """
+ """
+ assert "retcode" not in kwargs, "keyword retcode implicitly defined"
+ super(self.__class__, self).__init__(
+ retcode=retnames["ruleerror"],
+ *args, **kwargs
+ )
+
+
+class SchemaConflict(PyKwalifyException):
+ """
+ """
+ def __init__(self, *args, **kwargs):
+ """
+ """
+ assert "retcode" not in kwargs, "keyword retcode implicitly defined"
+ super(self.__class__, self).__init__(
+ retcode=retnames["schemaconflict"],
+ *args, **kwargs
+ )
diff --git a/pykwalify/rule.py b/pykwalify/rule.py
new file mode 100644
index 0000000..7ac2c9e
--- /dev/null
+++ b/pykwalify/rule.py
@@ -0,0 +1,1358 @@
+# -*- coding: utf-8 -*-
+
+""" pyKwalify - rule.py """
+
+# python stdlib
+import logging
+import re
+
+# pykwalify imports
+from pykwalify.compat import basestring
+from pykwalify.errors import SchemaConflict, RuleError
+from pykwalify.types import (
+ DEFAULT_TYPE,
+ is_bool,
+ is_builtin_type,
+ is_collection_type,
+ is_number,
+ is_string,
+ mapping_aliases,
+ sequence_aliases,
+ type_class,
+)
+
+log = logging.getLogger(__name__)
+
+
+class Rule(object):
+ """ Rule class that handles a rule constraint """
+
+ def __init__(self, schema=None, parent=None, strict_rule_validation=False):
+ self._allowempty_map = None
+ self._assertion = None
+ self._default = None
+ self._desc = None
+ self._enum = None
+ self._example = None
+ self._extensions = None
+ self._format = None
+ self._func = None
+ self._ident = None
+ self._include_name = None
+ self._length = None
+ self._map_regex_rule = None
+ self._mapping = None
+ # Possible values: [any, all, *]
+ self._matching = "any"
+ self._matching_rule = "any"
+ self._name = None
+ self._nullable = True
+ self._parent = parent
+ self._pattern = None
+ self._pattern_regexp = None
+ self._range = None
+ self._regex_mappings = None
+ self._required = False
+ self._schema = schema
+ self._schema_str = schema
+ self._sequence = None
+ self.strict_rule_validation = strict_rule_validation
+ self._type = None
+ self._type_class = None
+ self._unique = None
+ self._version = None
+
+ if isinstance(schema, dict):
+ self.init(schema, "")
+
+ @property
+ def allowempty_map(self):
+ return self._allowempty_map
+
+ @allowempty_map.setter
+ def allowempty_map(self, value):
+ self._allowempty_map = value
+
+ @property
+ def assertion(self):
+ return self._assertion
+
+ @assertion.setter
+ def assertion(self, value):
+ self._assertion = value
+
+ @property
+ def default(self):
+ return self._default
+
+ @default.setter
+ def default(self, value):
+ self._default = value
+
+ @property
+ def desc(self):
+ return self._desc
+
+ @desc.setter
+ def desc(self, value):
+ self._desc = value
+
+ @property
+ def enum(self):
+ return self._enum
+
+ @enum.setter
+ def enum(self, value):
+ self._enum = value
+
+ @property
+ def example(self):
+ return self._example
+
+ @example.setter
+ def example(self, value):
+ self._example = value
+
+ @property
+ def extensions(self):
+ return self._extensions
+
+ @extensions.setter
+ def extensions(self, value):
+ self._extensions = value
+
+ @property
+ def format(self):
+ return self._format
+
+ @format.setter
+ def format(self, value):
+ self._format = value
+
+ @property
+ def func(self):
+ return self._func
+
+ @func.setter
+ def func(self, value):
+ self._func = value
+
+ @property
+ def ident(self):
+ return self._ident
+
+ @ident.setter
+ def ident(self, value):
+ self._ident = value
+
+ @property
+ def include_name(self):
+ return self._include_name
+
+ @include_name.setter
+ def include_name(self, value):
+ self._include_name = value
+
+ @property
+ def length(self):
+ return self._length
+
+ @length.setter
+ def length(self, value):
+ self._length = value
+
+ @property
+ def map_regex_rule(self):
+ return self._map_regex_rule
+
+ @map_regex_rule.setter
+ def map_regex_rule(self, value):
+ self._map_regex_rule = value
+
+ @property
+ def mapping(self):
+ return self._mapping
+
+ @mapping.setter
+ def mapping(self, value):
+ self._mapping = value
+
+ @property
+ def matching(self):
+ return self._matching
+
+ @matching.setter
+ def matching(self, value):
+ self._matching = value
+
+ @property
+ def matching_rule(self):
+ return self._matching_rule
+
+ @matching_rule.setter
+ def matching_rule(self, value):
+ self._matching_rule = value
+
+ @property
+ def name(self):
+ return self._name
+
+ @name.setter
+ def name(self, value):
+ self._name = value
+
+ @property
+ def nullable(self):
+ return self._nullable
+
+ @nullable.setter
+ def nullable(self, value):
+ self._nullable = value
+
+ @property
+ def parent(self):
+ return self._parent
+
+ @parent.setter
+ def parent(self, value):
+ self._parent = value
+
+ @property
+ def pattern(self):
+ return self._pattern
+
+ @pattern.setter
+ def pattern(self, value):
+ self._pattern = value
+
+ @property
+ def pattern_regexp(self):
+ return self._pattern_regexp
+
+ @pattern_regexp.setter
+ def pattern_regexp(self, value):
+ self._pattern_regexp = value
+
+ @property
+ def range(self):
+ return self._range
+
+ @range.setter
+ def range(self, value):
+ self._range = value
+
+ @property
+ def regex_mappings(self):
+ return self._regex_mappings
+
+ @regex_mappings.setter
+ def regex_mappings(self, value):
+ self._regex_mappings = value
+
+ @property
+ def required(self):
+ return self._required
+
+ @required.setter
+ def required(self, value):
+ self._required = value
+
+ @property
+ def schema(self):
+ return self._schema
+
+ @schema.setter
+ def schema(self, value):
+ self._schema = value
+
+ @property
+ def schema_str(self):
+ return self._schema_str
+
+ @schema_str.setter
+ def schema_str(self, value):
+ self._schema_str = value
+
+ @property
+ def sequence(self):
+ return self._sequence
+
+ @sequence.setter
+ def sequence(self, value):
+ self._sequence = value
+
+ @property
+ def type(self):
+ return self._type
+
+ @type.setter
+ def type(self, value):
+ self._type = value
+
+ @property
+ def type_class(self):
+ return self._type_class
+
+ @type_class.setter
+ def type_class(self, value):
+ self._type_class = value
+
+ @property
+ def unique(self):
+ return self._unique
+
+ @unique.setter
+ def unique(self, value):
+ self._unique = value
+
+ @property
+ def version(self):
+ return self._version
+
+ @version.setter
+ def version(self, value):
+ self._version = value
+
+ def __str__(self):
+ return "Rule: {0}".format(str(self.schema_str))
+
+ def keywords(self):
+ """
+ Returns a list of all keywords that this rule object has defined.
+ A keyword is considered defined if the value it returns != None.
+ """
+ defined_keywords = [
+ ('allowempty_map', 'allowempty_map'),
+ ('assertion', 'assertion'),
+ ('default', 'default'),
+ ('class', 'class'),
+ ('desc', 'desc'),
+ ('enum', 'enum'),
+ ('example', 'example'),
+ ('extensions', 'extensions'),
+ ('format', 'format'),
+ ('func', 'func'),
+ ('ident', 'ident'),
+ ('include_name', 'include'),
+ ('length', 'length'),
+ ('map_regex_rule', 'map_regex_rule'),
+ ('mapping', 'mapping'),
+ ('matching', 'matching'),
+ ('matching_rule', 'matching_rule'),
+ ('name', 'name'),
+ ('nullable', 'nullable')
+ ('parent', 'parent'),
+ ('pattern', 'pattern'),
+ ('pattern_regexp', 'pattern_regexp'),
+ ('range', 'range'),
+ ('regex_mappings', 'regex_mappings'),
+ ('required', 'required'),
+ ('schema', 'schema'),
+ ('schema_str', 'schema_str'),
+ ('sequence', 'sequence'),
+ ('type', 'type'),
+ ('type_class', 'type_class'),
+ ('unique', 'unique'),
+ ('version', 'version'),
+ ]
+ found_keywords = []
+
+ for var_name, keyword_name in defined_keywords:
+ if getattr(self, var_name, None):
+ found_keywords.append(keyword_name)
+
+ return found_keywords
+
+ def init(self, schema, path):
+ """
+ """
+ log.debug(u"Init schema: %s", schema)
+
+ include = schema.get("include")
+
+ # Check if this item is a include, overwrite schema with include schema and continue to parse
+ if include:
+ log.debug(u"Found include tag...")
+ self.include_name = include
+ return
+
+ t = None
+ rule = self
+
+ if schema is not None:
+ if "type" not in schema:
+ # Mapping and sequence do not need explicit type defenitions
+ if any(sa in schema for sa in sequence_aliases):
+ t = "seq"
+ self.init_type_value(t, rule, path)
+ elif any(ma in schema for ma in mapping_aliases):
+ t = "map"
+ self.init_type_value(t, rule, path)
+ else:
+ t = DEFAULT_TYPE
+ self.type = t
+ else:
+ if not is_string(schema["type"]):
+ raise RuleError(
+ msg=u"Key 'type' in schema rule is not a string type (found %s)" % type(schema["type"]).__name__,
+ error_key=u"type.not_string",
+ path=path,
+ )
+
+ self.type = schema["type"]
+
+ self.schema_str = schema
+
+ if not t:
+ t = schema["type"]
+ self.init_type_value(t, rule, path)
+
+ func_mapping = {
+ "allowempty": self.init_allow_empty_map,
+ "assert": self.init_assert_value,
+ "class": lambda x, y, z: (),
+ "default": self.init_default_value,
+ "desc": self.init_desc_value,
+ "enum": self.init_enum_value,
+ "example": self.init_example,
+ "extensions": self.init_extensions,
+ "format": self.init_format_value,
+ "func": self.init_func,
+ "ident": self.init_ident_value,
+ "length": self.init_length_value,
+ "map": self.init_mapping_value,
+ "mapping": self.init_mapping_value,
+ "matching": self.init_matching,
+ "matching-rule": self.init_matching_rule,
+ "name": self.init_name_value,
+ "nul": self.init_nullable_value,
+ "nullable": self.init_nullable_value,
+ "pattern": self.init_pattern_value,
+ "range": self.init_range_value,
+ "req": self.init_required_value,
+ "required": self.init_required_value,
+ "seq": self.init_sequence_value,
+ "sequence": self.init_sequence_value,
+ "type": lambda x, y, z: (),
+ "unique": self.init_unique_value,
+ "version": self.init_version,
+ }
+
+ for k, v in schema.items():
+ if k in func_mapping:
+ func_mapping[k](v, rule, path)
+ elif k.startswith("schema;"):
+ # Schema tag is only allowed on top level of data
+ log.debug(u"Found schema tag...")
+ raise RuleError(
+ msg=u"Schema is only allowed on top level of schema file",
+ error_key=u"schema.not.toplevel",
+ path=path,
+ )
+ else:
+ raise RuleError(
+ msg=u"Unknown key: {0} found".format(k),
+ error_key=u"key.unknown",
+ path=path,
+ )
+
+ self.check_conflicts(schema, rule, path)
+
+ self.check_type_keywords(schema, rule, path)
+
+ def init_format_value(self, v, rule, path):
+ log.debug(u"Init format value : %s", path)
+
+ if is_string(v):
+ self._format = [v]
+ elif isinstance(v, list):
+ valid = True
+ for date_format in v:
+ if not isinstance(date_format, basestring):
+ valid = False
+
+ if valid:
+ self._format = v
+ else:
+ raise RuleError(
+ msg=u"All values in format list must be strings",
+ error_key=u"format.not_string",
+ path=path,
+ )
+ else:
+ raise RuleError(
+ msg=u"Value of format keyword: '{}' must be a string or list or string values".format(v),
+ error_key=u"format.not_string",
+ path=path,
+ )
+
+ valid_types = ("date", )
+
+ # Format is only supported when used with "type=date"
+ if self._type not in valid_types:
+ raise RuleError(
+ msg="Keyword format is only allowed when used with the following types: {0}".format(valid_types),
+ error_key=u"format.not_used_with_correct_type",
+ path=path,
+ )
+
+ def init_version(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init version value : {0}".format(path))
+
+ self._version = str(v)
+
+ def init_example(self, v, rule, path):
+ log.debug(u'Init example value : {0}'.format(path))
+
+ if not is_string(v):
+ raise RuleError(
+ msg=u"Value: {0} for keyword example must be a string".format(v),
+ error_key=u"example.not_string",
+ path=path,
+ )
+
+ self.desc = v
+
+ def init_length_value(self, v, rule, path):
+ log.debug(u'Init length value : {0}'.format(path))
+
+ supported_types = ["str", "int", "float", "number", "map", "seq"]
+
+ if not isinstance(v, dict):
+ raise RuleError(
+ msg=u"Length value is not a dict type: '{0}'".format(v),
+ error_key=u"length.not_map",
+ path=path,
+ )
+
+ if self.type not in supported_types:
+ raise RuleError(
+ msg=u"Length value type: '{0}' is not a supported type".format(self.type),
+ error_key=u"length.not_supported_type",
+ path=path,
+ )
+
+ # dict that should contain min, max, min-ex, max-ex keys
+ self.length = v
+
+ # This should validate that only min, max, min-ex, max-ex exists in the dict
+ for k, v in self.length.items():
+ if k not in ["max", "min", "max-ex", "min-ex"]:
+ raise RuleError(
+ msg=u"Unknown key: '{0}' found in length keyword".format(k),
+ error_key=u"length.unknown_key",
+ path=path,
+ )
+
+ if "max" in self.length and "max-ex" in self.length:
+ raise RuleError(
+ msg=u"'max' and 'max-ex' can't be used in the same length rule",
+ error_key=u"length.max_duplicate_keywords",
+ path=path,
+ )
+
+ if "min" in self.length and "min-ex" in self.length:
+ raise RuleError(
+ msg=u"'min' and 'min-ex' can't be used in the same length rule",
+ error_key=u"length.min_duplicate_keywords",
+ path=path,
+ )
+
+ max = self.length.get("max")
+ min = self.length.get("min")
+ max_ex = self.length.get("max-ex")
+ min_ex = self.length.get("min-ex")
+
+ if max is not None and not is_number(max) or is_bool(max):
+ raise RuleError(
+ msg=u"Value: '{0}' for 'max' keyword is not a number".format(v),
+ error_key=u"length.max.not_number",
+ path=path,
+ )
+
+ if min is not None and not is_number(min) or is_bool(min):
+ raise RuleError(
+ msg=u"Value: '{0}' for 'min' keyword is not a number".format(v),
+ error_key=u"length.min.not_number",
+ path=path,
+ )
+
+ if max_ex is not None and not is_number(max_ex) or is_bool(max_ex):
+ raise RuleError(
+ msg=u"Value: '{0}' for 'max-ex' keyword is not a number".format(v),
+ error_key=u"length.max_ex.not_number",
+ path=path,
+ )
+
+ if min_ex is not None and not is_number(min_ex) or is_bool(min_ex):
+ raise RuleError(
+ msg=u"Value: '{0}' for 'min-ex' keyword is not a number".format(v),
+ error_key=u"length.min_ex.not_number",
+ path=path,
+ )
+
+ # only numbers allow negative lengths
+ # string, map and seq require non negative lengtsh
+ if self.type not in ["int", "float", "number"]:
+ if min is not None and min < 0:
+ raise RuleError(
+ msg=u"Value for 'min' can't be negative in case of type {0}.".format(self.type),
+ error_key=u"length.min_negative",
+ path=path,
+ )
+ elif min_ex is not None and min_ex < 0:
+ raise RuleError(
+ msg=u"Value for 'min-ex' can't be negative in case of type {0}.".format(self.type),
+ error_key=u"length.min-ex_negative",
+ path=path,
+ )
+ if max is not None and max < 0:
+ raise RuleError(
+ msg=u"Value for 'max' can't be negative in case of type {0}.".format(self.type),
+ error_key=u"length.max_negative",
+ path=path,
+ )
+ elif max_ex is not None and max_ex < 0:
+ raise RuleError(
+ msg=u"Value for 'max-ex' can't be negative in case of type {0}.".format(self.type),
+ error_key=u"length.max-ex_negative",
+ path=path,
+ )
+
+ if max is not None:
+ if min is not None and max < min:
+ raise RuleError(
+ msg=u"Value for 'max' can't be less then value for 'min'. {0} < {1}".format(max, min),
+ error_key=u"length.max_lt_min",
+ path=path,
+ )
+ elif min_ex is not None and max <= min_ex:
+ raise RuleError(
+ msg=u"Value for 'max' can't be less then value for 'min-ex'. {0} <= {1}".format(max, min_ex),
+ error_key=u"length.max_le_min-ex",
+ path=path,
+ )
+ elif max_ex is not None:
+ if min is not None and max_ex < min:
+ raise RuleError(
+ msg=u"Value for 'max-ex' can't be less then value for 'min'. {0} < {1}".format(max_ex, min),
+ error_key=u"length.max-ex_le_min",
+ path=path,
+ )
+ elif min_ex is not None and max_ex <= min_ex:
+ raise RuleError(
+ msg=u"Value for 'max-ex' can't be less then value for 'min-ex'. {0} <= {1}".format(max_ex, min_ex),
+ error_key=u"length.max-ex_le_min-ex",
+ path=path,
+ )
+
+ def init_func(self, v, rule, path):
+ """
+ """
+ if not is_string(v):
+ raise RuleError(
+ msg=u"Value: {0} for func keyword must be a string".format(v),
+ error_key=u"func.notstring",
+ path=path,
+ )
+
+ self.func = v
+
+ def init_extensions(self, v, rule, path):
+ """
+ """
+ if not isinstance(v, list):
+ raise RuleError(
+ msg=u"Extension definition should be a list",
+ error_key=u"extension.not_list",
+ path=path,
+ )
+
+ # TODO: Add limitation that this keyword can only be used at the top level of the file
+
+ self.extensions = v
+
+ def init_matching_rule(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init matching-rule: %s", path)
+ log.debug(u"%s %s", v, rule)
+
+ # Verify that the provided rule is part of one of the allowed one
+ allowed = ["any", "all"]
+ # ["none", "one"] Is currently awaiting proper implementation
+ if v not in allowed:
+ raise RuleError(
+ msg=u"Specified rule in key: {0} is not part of allowed rule set : {1}".format(v, allowed),
+ error_key=u"matching_rule.not_allowed",
+ path=path,
+ )
+ else:
+ self.matching_rule = v
+
+ def init_allow_empty_map(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init allow empty value: %s", path)
+ log.debug(u"Type: %s : %s", v, rule)
+
+ self.allowempty_map = v
+
+ def init_type_value(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init type value : %s", path)
+ log.debug(u"Type: %s %s", v, rule)
+
+ if v is None:
+ v = DEFAULT_TYPE
+
+ self.type = v
+ self.type_class = type_class(v)
+
+ if not is_builtin_type(self.type):
+ raise RuleError(
+ msg=u"Type: {0} is not any of the known types".format(self.type),
+ error_key=u"type.unknown",
+ path=path,
+ )
+
+ def init_matching(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init matching rule : %s", path)
+
+ valid_values = ["any", "all", "*"]
+
+ if str(v) not in valid_values:
+ raise RuleError(
+ msg=u"matching value: {0} is not one of {1}".format(str(v), valid_values),
+ error_key=u"matching_rule.invalid",
+ path=path,
+ )
+
+ self.matching = str(v)
+
+ def init_name_value(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init name value : %s", path)
+
+ if not is_string(v):
+ raise RuleError(
+ msg=u"Value: {0} for keyword name must be a string".format(v),
+ error_key=u"name.not_string",
+ path=path,
+ )
+
+ self.name = v
+
+ def init_nullable_value(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init nullable value : %s", path)
+
+ if not isinstance(v, bool):
+ raise RuleError(
+ msg=u"Value: '{0}' for nullable keyword must be a boolean".format(v),
+ error_key=u"nullable.not_bool",
+ path=path,
+ )
+
+ self.nullable = v
+
+ def init_desc_value(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init descr value : %s", path)
+
+ if not is_string(v):
+ raise RuleError(
+ msg=u"Value: {0} for keyword desc must be a string".format(v),
+ error_key=u"desc.not_string",
+ path=path,
+ )
+
+ self.desc = v
+
+ def init_required_value(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init required value : %s", path)
+
+ if not is_bool(v):
+ raise RuleError(
+ msg=u"Value: '{0}' for required keyword must be a boolean".format(v),
+ error_key=u"required.not_bool",
+ path=path,
+ )
+ self.required = v
+
+ def init_pattern_value(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init pattern value : %s", path)
+
+ if not is_string(v):
+ raise RuleError(
+ msg=u"Value of pattern keyword: '{0}' is not a string".format(v),
+ error_key=u"pattern.not_string",
+ path=path,
+ )
+
+ self.pattern = v
+
+ if self.schema_str["type"] == "map":
+ raise RuleError(
+ msg=u"Keyword pattern is not allowed inside map",
+ error_key=u"pattern.not_allowed_in_map",
+ path=path,
+ )
+
+ # TODO: Some form of validation of the regexp? it exists in the source
+
+ try:
+ self.pattern_regexp = re.compile(self.pattern)
+ except Exception:
+ raise RuleError(
+ msg=u"Syntax error when compiling regex pattern: {0}".format(self.pattern_regexp),
+ error_key=u"pattern.syntax_error",
+ path=path,
+ )
+
+ def init_enum_value(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init enum value : %s", path)
+
+ if not isinstance(v, list):
+ raise RuleError(
+ msg=u"Enum is not a sequence",
+ error_key=u"enum.not_seq",
+ path=path,
+ )
+ self.enum = v
+
+ if is_collection_type(self.type):
+ raise RuleError(
+ msg=u"Enum is not a scalar",
+ error_key=u"enum.not_scalar",
+ path=path,
+ )
+
+ lookup = set()
+ for item in v:
+ if not isinstance(item, self.type_class):
+ raise RuleError(
+ msg=u"Item: '{0}' in enum is not of correct class type: '{1}'".format(item, self.type_class),
+ error_key=u"enum.type.unmatch",
+ path=path,
+ )
+
+ if item in lookup:
+ raise RuleError(
+ msg=u"Duplicate items: '{0}' found in enum".format(item),
+ error_key=u"enum.duplicate_items",
+ path=path,
+ )
+
+ lookup.add(item)
+
+ def init_assert_value(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init assert value : %s", path)
+
+ if not is_string(v):
+ raise RuleError(
+ msg=u"Value: '{0}' for keyword 'assert' is not a string".format(v),
+ error_key=u"assert.not_str",
+ path=path,
+ )
+
+ self.assertion = v
+
+ if any(k in self.assertion for k in (';', 'import', '__import__')):
+ raise RuleError(
+ msg=u"Value: '{assertion}' contain invalid content that is not allowed to be present in assertion keyword".format(assertion=self.assertion),
+ error_key=u"assert.unsupported_content",
+ path=path,
+ )
+
+ def init_range_value(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init range value : %s", path)
+
+ supported_types = ["str", "int", "float", "number", "map", "seq"]
+
+ if not isinstance(v, dict):
+ raise RuleError(
+ msg=u"Range value is not a dict type: '{0}'".format(v),
+ error_key=u"range.not_map",
+ path=path,
+ )
+
+ if self.type not in supported_types:
+ raise RuleError(
+ msg=u"Range value type: '{0}' is not a supported type".format(self.type),
+ error_key=u"range.not_supported_type",
+ path=path,
+ )
+
+ # dict that should contain min, max, min-ex, max-ex keys
+ self.range = v
+
+ # This should validate that only min, max, min-ex, max-ex exists in the dict
+ for k, v in self.range.items():
+ if k not in ["max", "min", "max-ex", "min-ex"]:
+ raise RuleError(
+ msg=u"Unknown key: '{0}' found in range keyword".format(k),
+ error_key=u"range.unknown_key",
+ path=path,
+ )
+
+ if "max" in self.range and "max-ex" in self.range:
+ raise RuleError(
+ msg=u"'max' and 'max-ex' can't be used in the same range rule",
+ error_key=u"range.max_duplicate_keywords",
+ path=path,
+ )
+
+ if "min" in self.range and "min-ex" in self.range:
+ raise RuleError(
+ msg=u"'min' and 'min-ex' can't be used in the same range rule",
+ error_key=u"range.min_duplicate_keywords",
+ path=path,
+ )
+
+ max = self.range.get("max")
+ min = self.range.get("min")
+ max_ex = self.range.get("max-ex")
+ min_ex = self.range.get("min-ex")
+
+ if max is not None and not is_number(max) or is_bool(max):
+ raise RuleError(
+ msg=u"Value: '{0}' for 'max' keyword is not a number".format(v),
+ error_key=u"range.max.not_number",
+ path=path,
+ )
+
+ if min is not None and not is_number(min) or is_bool(min):
+ raise RuleError(
+ msg=u"Value: '{0}' for 'min' keyword is not a number".format(v),
+ error_key=u"range.min.not_number",
+ path=path,
+ )
+
+ if max_ex is not None and not is_number(max_ex) or is_bool(max_ex):
+ raise RuleError(
+ msg=u"Value: '{0}' for 'max-ex' keyword is not a number".format(v),
+ error_key=u"range.max_ex.not_number",
+ path=path,
+ )
+
+ if min_ex is not None and not is_number(min_ex) or is_bool(min_ex):
+ raise RuleError(
+ msg=u"Value: '{0}' for 'min-ex' keyword is not a number".format(v),
+ error_key=u"range.min_ex.not_number",
+ path=path,
+ )
+
+ # only numbers allow negative ranges
+ # string, map and seq require non negative ranges
+ if self.type not in ["int", "float", "number"]:
+ if min is not None and min < 0:
+ raise RuleError(
+ msg=u"Value for 'min' can't be negative in case of type {0}.".format(self.type),
+ error_key=u"range.min_negative",
+ path=path,
+ )
+ elif min_ex is not None and min_ex < 0:
+ raise RuleError(
+ msg=u"Value for 'min-ex' can't be negative in case of type {0}.".format(self.type),
+ error_key=u"range.min-ex_negative",
+ path=path,
+ )
+ if max is not None and max < 0:
+ raise RuleError(
+ msg=u"Value for 'max' can't be negative in case of type {0}.".format(self.type),
+ error_key=u"range.max_negative",
+ path=path,
+ )
+ elif max_ex is not None and max_ex < 0:
+ raise RuleError(
+ msg=u"Value for 'max-ex' can't be negative in case of type {0}.".format(self.type),
+ error_key=u"range.max-ex_negative",
+ path=path,
+ )
+
+ if max is not None:
+ if min is not None and max < min:
+ raise RuleError(
+ msg=u"Value for 'max' can't be less then value for 'min'. {0} < {1}".format(max, min),
+ error_key=u"range.max_lt_min",
+ path=path,
+ )
+ elif min_ex is not None and max <= min_ex:
+ raise RuleError(
+ msg=u"Value for 'max' can't be less then value for 'min-ex'. {0} <= {1}".format(max, min_ex),
+ error_key=u"range.max_le_min-ex",
+ path=path,
+ )
+ elif max_ex is not None:
+ if min is not None and max_ex < min:
+ raise RuleError(
+ msg=u"Value for 'max-ex' can't be less then value for 'min'. {0} < {1}".format(max_ex, min),
+ error_key=u"range.max-ex_le_min",
+ path=path,
+ )
+ elif min_ex is not None and max_ex <= min_ex:
+ raise RuleError(
+ msg=u"Value for 'max-ex' can't be less then value for 'min-ex'. {0} <= {1}".format(max_ex, min_ex),
+ error_key=u"range.max-ex_le_min-ex",
+ path=path,
+ )
+
+ def init_ident_value(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init ident value : %s", path)
+
+ if v is None or not is_bool(v):
+ raise RuleError(
+ msg=u"Value: '{0}' of 'ident' is not a boolean value".format(v),
+ error_key=u"ident.not_bool",
+ path=path,
+ )
+
+ self.ident = bool(v)
+ self.required = True
+
+ if is_collection_type(self.type):
+ raise RuleError(
+ msg=u"Value: '{0}' of 'ident' is not a scalar value".format(v),
+ error_key=u"ident.not_scalar",
+ path=path,
+ )
+
+ if path == "":
+ raise RuleError(
+ msg=u"Keyword 'ident' can't be on root level of schema",
+ error_key=u"ident.not_on_root_level",
+ path=path,
+ )
+
+ if self.parent is None or not self.parent.type == "map":
+ raise RuleError(
+ msg=u"Keword 'ident' can't be inside 'map'",
+ error_key=u"ident.not_in_map",
+ path=path,
+ )
+
+ def init_unique_value(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init unique value : %s", path)
+
+ if not is_bool(v):
+ raise RuleError(
+ msg=u"Value: '{0}' for 'unique' keyword is not boolean".format(v),
+ error_key=u"unique.not_bool",
+ path=path,
+ )
+
+ self.unique = v
+
+ if is_collection_type(self.type):
+ raise RuleError(
+ msg=u"Type of the value: '{0}' for 'unique' keyword is not a scalar type".format(self.type),
+ error_key=u"unique.not_scalar",
+ path=path,
+ )
+ if path == "":
+ raise RuleError(
+ msg=u"Keyword 'unique' can't be on root level of schema",
+ error_key=u"unique.not_on_root_level",
+ path=path,
+ )
+
+ def init_sequence_value(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init sequence value : %s", path)
+
+ if v is not None and not isinstance(v, list):
+ raise RuleError(
+ msg=u"Sequence keyword is not a list",
+ error_key=u"sequence.not_seq",
+ path=path,
+ )
+
+ self.sequence = v
+
+ if self.sequence is None or len(self.sequence) == 0:
+ raise RuleError(
+ msg=u"Sequence contains 0 elements",
+ error_key=u"sequence.no_elements",
+ path=path,
+ )
+
+ tmp_seq = []
+
+ for i, e in enumerate(self.sequence):
+ elem = e or {}
+
+ rule = Rule(None, self)
+ rule.init(elem, u"{0}/sequence/{1}".format(path, i))
+
+ tmp_seq.append(rule)
+
+ self.sequence = tmp_seq
+
+ return rule
+
+ def init_mapping_value(self, v, rule, path):
+ """
+ """
+ # Check for duplicate use of 'map' and 'mapping'
+ if self.mapping:
+ raise RuleError(
+ msg=u"Keywords 'map' and 'mapping' can't be used on the same level",
+ error_key=u"mapping.duplicate_keywords",
+ path=path,
+ )
+
+ log.debug(u"Init mapping value : %s", path)
+
+ if v is not None and not isinstance(v, dict):
+ raise RuleError(
+ msg=u"Value for keyword 'map/mapping' is not a dict",
+ error_key=u"mapping.not_dict",
+ path=path,
+ )
+
+ if v is None or len(v) == 0:
+ raise RuleError(
+ msg=u"Mapping do not contain any elements",
+ error_key=u"mapping.no_elements",
+ path=path,
+ )
+
+ self.mapping = {}
+ self.regex_mappings = []
+
+ for k, v in v.items():
+ if v is None:
+ v = {}
+
+ # Check if this is a regex rule. Handle specially
+ if k.startswith("regex;") or k.startswith("re;"):
+ log.debug(u"Found regex map rule")
+ regex = k.split(";", 1)
+ if len(regex) != 2:
+ raise RuleError(
+ msg=u"Value: '{0}' for keyword regex is malformed".format(k),
+ error_key=u"mapping.regex.malformed",
+ path=path,
+ )
+ else:
+ regex = regex[1]
+ try:
+ re.compile(regex)
+ except Exception as e:
+ log.debug(e)
+ raise RuleError(
+ msg=u"Unable to compile regex '{0}'".format(regex),
+ error_key=u"mapping.regex.compile_error",
+ path=path,
+ )
+
+ regex_rule = Rule(None, self)
+ regex_rule.init(v, u"{0}/mapping;regex/{1}".format(path, regex[1:-1]))
+ regex_rule.map_regex_rule = regex[1:-1]
+ self.regex_mappings.append(regex_rule)
+ self.mapping[k] = regex_rule
+ else:
+ rule = Rule(None, self)
+ rule.init(v, u"{0}/mapping/{1}".format(path, k))
+ self.mapping[k] = rule
+
+ return rule
+
+ def init_default_value(self, v, rule, path):
+ """
+ """
+ log.debug(u"Init default value : %s", path)
+ self.default = v
+
+ if is_collection_type(self.type):
+ raise RuleError(
+ msg=u"Value: {0} for keyword 'default' is not a scalar type".format(v),
+ error_key=u"default.not_scalar",
+ path=path,
+ )
+
+ if self.type == "map" or self.type == "seq":
+ raise RuleError(
+ msg=u"Value: {0} for keyword 'default' is not a scalar type".format(v),
+ error_key=u"default.not_scalar",
+ path=path,
+ )
+
+ if not isinstance(v, self.type_class):
+ raise RuleError(
+ msg=u"Types do not match: '{0}' --> '{1}'".format(v, self.type_class),
+ error_key=u"default.type.unmatch",
+ path=path,
+ )
+
+ def check_type_keywords(self, schema, rule, path):
+ """
+ All supported keywords:
+ - allowempty_map
+ - assertion
+ - class
+ - date
+ - default
+ - desc
+ - enum
+ - example
+ - extensions
+ - func
+ - ident
+ - include_name
+ - map_regex_rule
+ - mapping
+ - matching
+ - matching_rule
+ - name
+ - nullable
+ - pattern
+ - pattern_regexp
+ - range
+ - regex_mappings
+ - required
+ - schema
+ - sequence
+ - type
+ - type_class
+ - unique
+ - version
+ """
+ if not self.strict_rule_validation:
+ return
+
+ global_keywords = ['type', 'desc', 'example', 'extensions', 'name', 'nullable', 'version', 'func', 'include']
+ all_allowed_keywords = {
+ 'str': global_keywords + ['default', 'pattern', 'range', 'enum', 'required', 'unique', 'req'],
+ 'int': global_keywords + ['default', 'range', 'enum', 'required', 'unique'],
+ 'float': global_keywords + ['default', 'enum', 'range', 'required'],
+ 'number': global_keywords + ['default', 'enum'],
+ 'bool': global_keywords + ['default', 'enum'],
+ 'map': global_keywords + ['allowempty_map', 'mapping', 'map', 'allowempty', 'required', 'matching-rule', 'range', 'class'],
+ 'seq': global_keywords + ['sequence', 'seq', 'required', 'range', 'matching'],
+ 'sequence': global_keywords + ['sequence', 'seq', 'required'],
+ 'mapping': global_keywords + ['mapping', 'seq', 'required'],
+ 'timestamp': global_keywords + ['default', 'enum'],
+ 'date': global_keywords + ['default', 'enum'],
+ 'symbol': global_keywords + ['default', 'enum'],
+ 'scalar': global_keywords + ['default', 'enum'],
+ 'text': global_keywords + ['default', 'enum', 'pattern'],
+ 'any': global_keywords + ['default', 'enum'],
+ 'enum': global_keywords + ['default', 'enum'],
+ 'none': global_keywords + ['default', 'enum', 'required'],
+ }
+ rule_type = schema.get('type')
+ if not rule_type:
+ # Special cases for the "shortcut methods"
+ if 'sequence' in schema or 'seq' in schema:
+ rule_type = 'sequence'
+ elif 'mapping' in schema or 'map' in schema:
+ rule_type = 'mapping'
+
+ allowed_keywords = all_allowed_keywords.get(rule_type)
+ if not allowed_keywords and 'sequence' not in schema and 'mapping' not in schema and 'seq' not in schema and 'map' not in schema:
+ raise RuleError('No allowed keywords found for type: {0}'.format(rule_type))
+
+ for k, v in schema.items():
+ if k not in allowed_keywords:
+ raise RuleError('Keyword "{0}" is not supported for type: "{1}" '.format(k, rule_type))
+
+ def check_conflicts(self, schema, rule, path):
+ """
+ """
+ log.debug(u"Checking for conflicts : %s", path)
+
+ if self.type == "seq":
+ if all(sa not in schema for sa in sequence_aliases):
+ raise SchemaConflict(
+ msg="Type is sequence but no sequence alias found on same level",
+ error_key=u"seq.no_sequence",
+ path=path,
+ )
+
+ if self.enum is not None:
+ raise SchemaConflict(
+ msg="Sequence and enum can't be on the same level in the schema",
+ error_key=u"seq.conflict.enum",
+ path=path,
+ )
+
+ if self.pattern is not None:
+ raise SchemaConflict(
+ msg="Sequence and pattern can't be on the same level in the schema",
+ error_key=u"seq.conflict.pattern",
+ path=path,
+ )
+
+ if self.mapping is not None:
+ raise SchemaConflict(
+ msg="Sequence and mapping can't be on the same level in the schema",
+ error_key=u"seq.conflict.mapping",
+ path=path,
+ )
+ elif self.type == "map":
+ if all(ma not in schema for ma in mapping_aliases) and not self.allowempty_map:
+ raise SchemaConflict(
+ msg="Type is mapping but no mapping alias found on same level",
+ error_key=u"map.no_mapping",
+ path=path,
+ )
+
+ if self.enum is not None:
+ raise SchemaConflict(
+ msg="Mapping and enum can't be on the same level in the schema",
+ error_key=u"map.conflict.enum",
+ path=path,
+ )
+
+ if self.sequence is not None:
+ raise SchemaConflict(
+ msg="Mapping and sequence can't be on the same level in the schema",
+ error_key=u"map.conflict.sequence",
+ path=path,
+ )
+ else:
+ if self.sequence is not None:
+ raise SchemaConflict(
+ msg="Scalar and sequence can't be on the same level in the schema",
+ error_key=u"scalar.conflict.sequence",
+ path=path,
+ )
+
+ if self.mapping is not None:
+ raise SchemaConflict(
+ msg="Scalar and mapping can't be on the same level in the schema",
+ error_key=u"scalar.conflict.mapping",
+ path=path,
+ )
+
+ if self.enum is not None and self.range is not None:
+ raise SchemaConflict(
+ msg="Enum and range can't be on the same level in the schema",
+ error_key=u"enum.conflict.range",
+ path=path,
+ )
diff --git a/pykwalify/types.py b/pykwalify/types.py
new file mode 100644
index 0000000..beb33ad
--- /dev/null
+++ b/pykwalify/types.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+
+""" pyKwalify - types.py """
+
+# python stdlib
+import datetime
+from pykwalify.compat import basestring, bytes
+
+DEFAULT_TYPE = "str"
+
+
+class TextMeta(type):
+ def __instancecheck__(self, instance):
+ return is_text(instance)
+
+
+class text(object):
+ __metaclass__ = TextMeta
+
+
+_types = {
+ "str": str,
+ "int": int,
+ "float": float,
+ "number": None,
+ "bool": bool,
+ "map": dict,
+ "seq": list,
+ "timestamp": datetime.datetime,
+ "date": datetime.date,
+ "symbol": str,
+ "scalar": None,
+ "text": text,
+ "any": object,
+ "enum": str,
+ "none": None
+}
+
+
+sequence_aliases = ["sequence", "seq"]
+mapping_aliases = ["map", "mapping"]
+
+
+def type_class(type):
+ return _types[type]
+
+
+def is_builtin_type(type):
+ return type in _types
+
+
+def is_collection_type(type):
+ return type.lower().strip() == "map" or type.lower().strip() == "seq"
+
+
+def is_scalar_type(type):
+ return not is_collection_type(type)
+
+
+def is_collection(obj):
+ return isinstance(obj, dict) or isinstance(obj, list)
+
+
+def is_scalar(obj):
+ return not is_collection(obj) and obj is not None
+
+
+def is_correct_type(obj, type):
+ return isinstance(obj, type)
+
+
+def is_string(obj):
+ return isinstance(obj, basestring) or isinstance(obj, bytes)
+
+
+def is_int(obj):
+ """
+ True & False is not considered valid integers even if python considers them 1 & 0 in some versions
+ """
+ return isinstance(obj, int) and not isinstance(obj, bool)
+
+
+def is_bool(obj):
+ return isinstance(obj, bool)
+
+
+def is_float(obj):
+ """
+ Valid types are:
+ - objects of float type
+ - Strings that can be converted to float. For example '1e-06'
+ """
+ is_f = isinstance(obj, float)
+ if not is_f:
+ try:
+ float(obj)
+ is_f = True
+ except (ValueError, TypeError):
+ is_f = False
+ return is_f and not is_bool(obj)
+
+
+def is_number(obj):
+ return is_int(obj) or is_float(obj)
+
+
+def is_text(obj):
+ return (is_string(obj) or is_number(obj)) and is_bool(obj) is False
+
+
+def is_any(obj):
+ return True
+
+
+def is_enum(obj):
+ return isinstance(obj, basestring)
+
+
+def is_none(obj):
+ return obj is None
+
+
+def is_sequence_alias(alias):
+ return alias in sequence_aliases
+
+
+def is_mapping_alias(alias):
+ return alias in mapping_aliases
+
+
+def is_timestamp(obj):
+ """
+ Yaml either have automatically converted it to a datetime object
+ or it is a string that will be validated later.
+ """
+ return isinstance(obj, datetime.datetime) or is_string(obj) or is_int(obj) or is_float(obj)
+
+
+def is_date(obj):
+ """
+ :param obj: Object that is to be validated
+ :return: True/False if obj is valid date object
+ """
+ return isinstance(obj, basestring) or isinstance(obj, datetime.date)
+
+
+tt = {
+ "str": is_string,
+ "int": is_int,
+ "bool": is_bool,
+ "float": is_float,
+ "number": is_number,
+ "text": is_text,
+ "any": is_any,
+ "enum": is_enum,
+ "none": is_none,
+ "timestamp": is_timestamp,
+ "scalar": is_scalar,
+ "date": is_date,
+}
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 0000000..992b9b0
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,5 @@
+# test_*py by default
+# add tests.py and test.py
+[pytest]
+norecursedirs=.tox .git pykwalify.egg-info dist docs examples
+python_files=test*.py
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..f6e3211
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,3 @@
+docopt>=0.6.2
+PyYAML>=3.11
+python-dateutil>=2.4.2
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..5bb0111
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,5 @@
+[metadata]
+license_file = LICENSE
+
+[bdist_wheel]
+universal = 1
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..92db082
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,57 @@
+import os
+try:
+ from setuptools import setup
+except ImportError:
+ from distutils.core import setup
+
+with open('README.md') as f:
+ readme = f.read()
+with open(os.path.join('docs', 'release-notes.rst')) as f:
+ history = f.read()
+
+setup(
+ name="pykwalify",
+ version="1.7.0",
+ description='Python lib/cli for JSON/YAML schema validation',
+ long_description=readme + '\n\n' + history,
+ long_description_content_type='text/markdown',
+ author="Johan Andersson",
+ author_email="Grokzen@gmail.com",
+ maintainer='Johan Andersson',
+ maintainer_email='Grokzen@gmail.com',
+ license='MIT',
+ packages=['pykwalify'],
+ url='http://github.com/grokzen/pykwalify',
+ extras_require={
+ 'ruamel': ["ruamel.yaml>=0.11.0,<0.16.0"],
+ },
+ entry_points={
+ 'console_scripts': [
+ 'pykwalify = pykwalify.cli:cli_entrypoint',
+ ],
+ },
+ install_requires=[
+ 'docopt>=0.6.2',
+ 'PyYAML>=3.11',
+ 'python-dateutil>=2.4.2',
+ ],
+ classifiers=[
+ # 'Development Status :: 1 - Planning',
+ # 'Development Status :: 2 - Pre-Alpha',
+ # 'Development Status :: 3 - Alpha',
+ # 'Development Status :: 4 - Beta',
+ 'Development Status :: 5 - Production/Stable',
+ # 'Development Status :: 6 - Mature',
+ # 'Development Status :: 7 - Inactive',
+ 'Intended Audience :: Developers',
+ 'Operating System :: OS Independent',
+ 'License :: OSI Approved :: MIT License',
+ 'Environment :: Console',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ ],
+)
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..3dbf9fa
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+
+""" pyKwalify validation framework """
diff --git a/tests/files/README.md b/tests/files/README.md
new file mode 100644
index 0000000..3faa375
--- /dev/null
+++ b/tests/files/README.md
@@ -0,0 +1,34 @@
+# Test files
+
+Test files are divided up into 2 types of tests. They follow a naming schema that follows `(Number)(Type).yaml` Where number is just a ever increasing integer and Type is different depending on the test type. Each type of test should be counted seperatly.
+
+- Successfull tests. Type: 's'
+- Failing tests. Type: 'f'
+
+
+
+# Successfull tests
+
+Files in `success` folder.
+
+Each file should contain a top level dict with the keys `data` and `schema` where the test data should exists.
+
+
+
+# Failing tests
+
+Files in `fail` folder.
+
+Each file should contain a top level dict with the keys `data`, `schema` and `errors` where the test data should exists.
+
+
+
+# cli tests
+
+Simple schema and data files that is used to test input of files via cli.
+
+
+
+# partial schemas
+
+Files used to test partial schema support.
diff --git a/tests/files/cli/1a.yaml b/tests/files/cli/1a.yaml
new file mode 100644
index 0000000..c34e2ee
--- /dev/null
+++ b/tests/files/cli/1a.yaml
@@ -0,0 +1,3 @@
+- foo
+- bar
+- baz
diff --git a/tests/files/cli/1b.yaml b/tests/files/cli/1b.yaml
new file mode 100644
index 0000000..41f00aa
--- /dev/null
+++ b/tests/files/cli/1b.yaml
@@ -0,0 +1,3 @@
+type: seq
+sequence:
+ - type: str
diff --git a/tests/files/cli/2a.yaml b/tests/files/cli/2a.yaml
new file mode 100644
index 0000000..a0cd6f3
--- /dev/null
+++ b/tests/files/cli/2a.yaml
@@ -0,0 +1,3 @@
+- 1
+- 2
+- 3
diff --git a/tests/files/cli/2b.yaml b/tests/files/cli/2b.yaml
new file mode 100644
index 0000000..41f00aa
--- /dev/null
+++ b/tests/files/cli/2b.yaml
@@ -0,0 +1,3 @@
+type: seq
+sequence:
+ - type: str
diff --git a/tests/files/fail/test_anchor.yaml b/tests/files/fail/test_anchor.yaml
new file mode 100644
index 0000000..dca97f5
--- /dev/null
+++ b/tests/files/fail/test_anchor.yaml
@@ -0,0 +1,95 @@
+---
+name: fail-anchor-1
+desc: schema with anchor
+schema:
+ type: seq
+ required: true
+ sequence:
+ - type: map
+ required: true
+ mapping:
+ first-name: &name
+ type: str
+ required: true
+ family-name: *name
+data:
+ - first-name: foo
+ last-name: Foo
+ - first-name: bar
+ family-name: 100
+errors:
+ - "Cannot find required key 'family-name'. Path: '/0'"
+ - "Key 'last-name' was not defined. Path: '/0'"
+ - "Value '100' is not of type 'str'. Path: '/1/family-name'"
+ ## Kwalify errors
+ # :required_nokey : 1:3:[/0] key 'family-name:' is required.
+ # :key_undefined : 2:3:[/0/last-name] key 'last-name:' is undefined.
+ # :type_unmatch : 4:3:[/1/family-name] '100': not a string.
+---
+name: fail-anchor-2
+desc: schema with anchor 2
+schema:
+ type: map
+ required: true
+ mapping:
+ title: &name
+ type: str
+ required: true
+ address-book:
+ type: seq
+ required: true
+ sequence:
+ - type: map
+ mapping:
+ name: *name
+ email:
+ type: str
+ required: true
+data:
+ title: my friends
+ address-book:
+ - name: 100
+ email: foo@mail.com
+ - first-name: bar
+ email: bar@mail.com
+errors:
+ - "Cannot find required key 'name'. Path: '/address-book/1'"
+ - "Key 'first-name' was not defined. Path: '/address-book/1'"
+ - "Value '100' is not of type 'str'. Path: '/address-book/0/name'"
+ ## Kwalify errors
+ # :type_unmatch : 3:5:[/address-book/0/name] '100': not a string.
+ # :required_nokey : 5:5:[/address-book/1] key 'name:' is required.
+ # :key_undefined : 5:5:[/address-book/1/first-name] key 'first-name:' is undefined.
+# TODO: THIS TEST IS BROKEN BECUASE IT CAUSE INFINITE RECURSION IN PYTHON
+# ---
+# name: fail-anchor-3
+# desc: document with anchor
+# schema:
+# type: seq
+# sequence:
+# - &employee
+# type: map
+# mapping:
+# name:
+# type: str
+# post:
+# type: str
+# enum:
+# - exective
+# - manager
+# - clerk
+# supervisor: *employee
+# data:
+# - &foo
+# name: 100
+# post: exective
+# supervisor: *foo
+# - &bar
+# name: foo
+# post: worker
+# supervisor: *foo
+# errors:
+# - ''
+# ## Kwalify errors
+# # :type_unmatch : 2:3:[/0/name] '100': not a string.
+# # :enum_notexist : 7:3:[/1/post] 'worker': invalid post value.
diff --git a/tests/files/fail/test_assert.yaml b/tests/files/fail/test_assert.yaml
new file mode 100644
index 0000000..9073cdf
--- /dev/null
+++ b/tests/files/fail/test_assert.yaml
@@ -0,0 +1,34 @@
+---
+name: fail-assert-1
+desc: assert test
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ "less-than":
+ type: number
+ assert: val < 8
+ "more-than":
+ type: number
+ assert: 3 < val
+ "between":
+ type: number
+ assert: 3 < val and val < 8
+ "except":
+ type: number
+ assert: val < 3 or 8 < val
+data:
+ - less-than: 8
+ - more-than: 3
+ - between: 2.9
+ - except: 3.1
+errors:
+ - "Value: '2.9' assertion expression failed (3 < val and val < 8)"
+ - "Value: '3' assertion expression failed (3 < val)"
+ - "Value: '3.1' assertion expression failed (val < 3 or 8 < val)"
+ - "Value: '8' assertion expression failed (val < 8)"
+ # :assert_failed : 1:3:[/0/less-than] '8': assertion expression failed (val < 8).
+ # :assert_failed : 2:3:[/1/more-than] '3': assertion expression failed (3 < val).
+ # :assert_failed : 3:3:[/2/between] '2.9': assertion expression failed (3 < val and val < 8).
+ # :assert_failed : 4:3:[/3/except] '3.1': assertion expression failed (val < 3 or 8 < val).
diff --git a/tests/files/fail/test_default.yaml b/tests/files/fail/test_default.yaml
new file mode 100644
index 0000000..ab0a0a5
--- /dev/null
+++ b/tests/files/fail/test_default.yaml
@@ -0,0 +1,21 @@
+---
+name: fail-default-1
+desc: default value of map
+schema:
+ type: map
+ mapping:
+ =:
+ type: number
+ range:
+ min: -10
+ max: 10
+data:
+ value1: 0
+ value2: 20
+ value3: -20
+errors:
+ - "Type 'scalar' has size of '-20', less than min limit '-10'. Path: '/value3'"
+ - "Type 'scalar' has size of '20', greater than max limit '10'. Path: '/value2'"
+ ## Kwalify errors
+ # :range_toolarge : 2:1:[/value2] '20': too large (> max 10).
+ # :range_toosmall : 3:1:[/value3] '-20': too small (< min -10).
diff --git a/tests/files/fail/test_desc.yaml b/tests/files/fail/test_desc.yaml
new file mode 100644
index 0000000..401dde3
--- /dev/null
+++ b/tests/files/fail/test_desc.yaml
@@ -0,0 +1 @@
+# Becuase desc has no validation done on the value there is no failure case for this keyword
diff --git a/tests/files/fail/test_enum.yaml b/tests/files/fail/test_enum.yaml
new file mode 100644
index 0000000..9f619dc
--- /dev/null
+++ b/tests/files/fail/test_enum.yaml
@@ -0,0 +1,16 @@
+---
+name: fail-enum-1
+desc: Test simple enum
+data:
+ - A
+ - B
+ - O
+schema:
+ type: seq
+ sequence:
+ - type: str
+ enum: [E, F, G, H]
+errors:
+ - "Enum 'A' does not exist. Path: '/0'"
+ - "Enum 'B' does not exist. Path: '/1'"
+ - "Enum 'O' does not exist. Path: '/2'"
diff --git a/tests/files/fail/test_example.yaml b/tests/files/fail/test_example.yaml
new file mode 100644
index 0000000..baa8caf
--- /dev/null
+++ b/tests/files/fail/test_example.yaml
@@ -0,0 +1 @@
+# Becuase example has no validation done on the value there is no failure case for this keyword \ No newline at end of file
diff --git a/tests/files/fail/test_extensions.yaml b/tests/files/fail/test_extensions.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/fail/test_extensions.yaml
diff --git a/tests/files/fail/test_func.yaml b/tests/files/fail/test_func.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/fail/test_func.yaml
diff --git a/tests/files/fail/test_ident.yaml b/tests/files/fail/test_ident.yaml
new file mode 100644
index 0000000..2531dfe
--- /dev/null
+++ b/tests/files/fail/test_ident.yaml
@@ -0,0 +1,23 @@
+---
+name: fail-ident-1
+desc: ident constraint test
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ "name":
+ ident: true
+ "age":
+ type: int
+data:
+ - name: foo
+ age: 10
+ - name: bar
+ age: 10
+ - name: bar
+ age: 10
+errors:
+ - "Value 'bar' is not unique. Previous path: '/1/name'. Path: '/2/name'"
+ ## Kwalify errors
+ # :value_notunique : 5:3:[/2/name] 'bar': is already used at '/1/name'.
diff --git a/tests/files/fail/test_include.yaml b/tests/files/fail/test_include.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/fail/test_include.yaml
diff --git a/tests/files/fail/test_length.yaml b/tests/files/fail/test_length.yaml
new file mode 100644
index 0000000..8264987
--- /dev/null
+++ b/tests/files/fail/test_length.yaml
@@ -0,0 +1,113 @@
+---
+name: fail-length-1
+desc: length test
+schema:
+ type: map
+ mapping:
+ "max-only":
+ type: seq
+ sequence:
+ - type: str
+ length: {max: 8}
+ "min-only":
+ type: seq
+ sequence:
+ - type: str
+ length: {min: 4}
+ "max-and-min":
+ type: seq
+ sequence:
+ - type: str
+ length: {max: 8, min: 4}
+data:
+ max-only:
+ - hogehoge!
+ min-only:
+ - foo
+ -
+ max-and-min:
+ - foobarbaz
+ - foo
+errors:
+ - "Value: 'foo' has length of '3', greater than min limit '4'. Path: '/max-and-min/1'"
+ - "Value: 'foo' has length of '3', greater than min limit '4'. Path: '/min-only/0'"
+ - "Value: 'foobarbaz' has length of '9', greater than max limit '8'. Path: '/max-and-min/0'"
+ - "Value: 'hogehoge!' has length of '9', greater than max limit '8'. Path: '/max-only/0'"
+ ## Kwalify errors
+ # :length_toolong : 2:3:[/max-only/0] 'hogehoge!': too long (length 9 > max 8).
+ # :length_tooshort : 4:3:[/min-only/0] 'foo': too short (length 3 < min 4).
+ # :length_toolong : 7:3:[/max-and-min/0] 'foobarbaz': too long (length 9 > max 8).
+ # :length_tooshort : 8:3:[/max-and-min/1] 'foo': too short (length 3 < min 4).
+---
+name: fail-length-2
+desc: length test (with max-ex and min-ex)
+schema:
+ type: map
+ mapping:
+ "max-ex-only":
+ type: seq
+ sequence:
+ - type: str
+ length: {max-ex: 8}
+ "min-ex-only":
+ type: seq
+ sequence:
+ - type: str
+ length: {min-ex: 4}
+ "max-ex-and-min-ex":
+ type: seq
+ sequence:
+ - type: str
+ length: {max-ex: 8, min-ex: 4}
+data:
+ max-ex-only:
+ - hogehoge
+ min-ex-only:
+ - foo!
+ -
+ max-ex-and-min-ex:
+ - foobarba
+ - foo!
+errors:
+ - "Value: 'foo!' has length of '4', greater than min_ex limit '4'. Path: '/max-ex-and-min-ex/1'"
+ - "Value: 'foo!' has length of '4', greater than min_ex limit '4'. Path: '/min-ex-only/0'"
+ - "Value: 'foobarba' has length of '8', greater than max_ex limit '8'. Path: '/max-ex-and-min-ex/0'"
+ - "Value: 'hogehoge' has length of '8', greater than max_ex limit '8'. Path: '/max-ex-only/0'"
+ ## Kwalify errors
+ # :length_toolongex : 2:3:[/max-ex-only/0] 'hogehoge': too long (length 8 >= max 8).
+ # :length_tooshortex : 4:3:[/min-ex-only/0] 'foo!': too short (length 4 <= min 4).
+ # :length_toolongex : 7:3:[/max-ex-and-min-ex/0] 'foobarba': too long (length 8 >= max 8).
+ # :length_tooshortex : 8:3:[/max-ex-and-min-ex/1] 'foo!': too short (length 4 <= min 4).
+---
+name: fail-length-3
+desc: length test (with min, max, max-ex and min-ex)
+schema:
+ type: map
+ mapping:
+ "A":
+ type: seq
+ sequence:
+ - type: str
+ length: {max: 8, min-ex: 4}
+ "B":
+ type: seq
+ sequence:
+ - type: str
+ length: {max-ex: 8, min: 4}
+data:
+ A:
+ - hogehoge!
+ - hoge
+ B:
+ - hogehoge
+ - hog
+errors:
+ - "Value: 'hog' has length of '3', greater than min limit '4'. Path: '/B/1'"
+ - "Value: 'hoge' has length of '4', greater than min_ex limit '4'. Path: '/A/1'"
+ - "Value: 'hogehoge!' has length of '9', greater than max limit '8'. Path: '/A/0'"
+ - "Value: 'hogehoge' has length of '8', greater than max_ex limit '8'. Path: '/B/0'"
+ ## Kwalify errors
+ # :length_toolong : 2:3:[/A/0] 'hogehoge!': too long (length 9 > max 8).
+ # :length_tooshortex : 3:3:[/A/1] 'hoge': too short (length 4 <= min 4).
+ # :length_toolongex : 5:3:[/B/0] 'hogehoge': too long (length 8 >= max 8).
+ # :length_tooshort : 6:3:[/B/1] 'hog': too short (length 3 < min 4).
diff --git a/tests/files/fail/test_mapping.yaml b/tests/files/fail/test_mapping.yaml
new file mode 100644
index 0000000..0561ea4
--- /dev/null
+++ b/tests/files/fail/test_mapping.yaml
@@ -0,0 +1,186 @@
+---
+name: fail-mapping-1
+desc: This test that typechecking works when value in map is None
+data:
+ streams:
+ - name: ~
+ sampleRateMultiple: 1
+ - name: media
+ sampleRateMultiple: 2
+schema:
+ type: map
+ mapping:
+ streams:
+ type: seq
+ required: True
+ sequence:
+ - type: map
+ mapping:
+ name:
+ type: str
+ range:
+ min: 1
+ required: True
+ sampleRateMultiple:
+ type: int
+ required: True
+errors:
+ - "required.novalue : '/streams/0/name'"
+---
+name: fail-mapping-2
+desc: Test keyword regex using default matching-rule 'any'
+data:
+ foobar1: 1
+ foobar2: 2
+ foobar3: 3
+schema:
+ type: map
+ mapping:
+ regex;(^foobar[1-2]$):
+ type: int
+errors:
+ - "Key 'foobar3' does not match any regex '^foobar[1-2]$'. Path: ''"
+---
+name: fail-mapping-3
+desc: Test keyword regex using declared matching-rule 'any'
+data:
+ foobar1: 1
+ foobar2: 2
+ bar3: 3
+schema:
+ type: map
+ matching-rule: 'any'
+ mapping:
+ regex;(^foobar):
+ type: int
+ regex;([1-2]$):
+ type: int
+errors:
+ - "Key 'bar3' does not match any regex '[1-2]$' or '^foobar'. Path: ''"
+---
+name: fail-mapping-4
+desc: Test keyword regex using declared matching-rule 'all'
+data:
+ foobar1: 1
+ foobar2: 2
+ foobar3: 3
+schema:
+ type: map
+ matching-rule: 'all'
+ mapping:
+ regex;(^foobar.*$):
+ type: int
+ regex;(^.*[1-2]$):
+ type: int
+errors:
+ - "Key 'foobar3' does not match all regex '^.*[1-2]$' and '^foobar.*$'. Path: ''"
+---
+name: fail-mapping-5
+desc: Test that sequence of mappings check the correct type and raises correct error when value is not a dict
+data:
+ - foo: whatever
+ - "sgdf"
+ - 2
+ - ~
+schema:
+ type: seq
+ required: True
+ matching: all
+ seq:
+ - type: map
+ required: True
+ map:
+ foo:
+ type: str
+errors:
+ - "Value '2' is not a dict. Value path: '/2'"
+ - "Value 'sgdf' is not a dict. Value path: '/1'"
+ - "required.novalue : '/3'"
+---
+name: fail-mapping-6
+desc: Test that type checking of mapping is done even if the mapping keyword is not specefied in the schema
+data:
+ - not
+ - a
+ - map
+schema:
+ type: map
+ allowempty: True
+errors:
+ - "Value '['not', 'a', 'map']' is not a dict. Value path: ''"
+---
+name: fail-mapping-7
+desc: Test that default mode fails out in a similar way to regular mode and that a key that is not defined when default is set uses the default impl
+data:
+ OWNERSHIP: abc
+ WHT: def
+schema:
+ type: map
+ mapping:
+ WHT:
+ type: int
+ =:
+ type: int
+errors:
+ - "Value 'abc' is not of type 'int'. Path: '/OWNERSHIP'"
+ - "Value 'def' is not of type 'int'. Path: '/WHT'"
+---
+name: fail-mapping-8
+desc: mapping test
+schema:
+ type: map
+ required: true
+ mapping:
+ name:
+ type: str
+ required: true
+ email:
+ type: str
+ # This pattern value was modified from /@/ to .+@.+ to make it copmatible with python
+ pattern: .+@.+
+ required: True
+ age:
+ type: int
+ blood:
+ type: str
+ enum:
+ - A
+ - B
+ - O
+ - AB
+ birth:
+ type: date
+data:
+ nam: foo
+ email: foo(at)mail.com
+ age: twenty
+ blood: ab
+ birth: Jul 01, 1985
+errors:
+ - "Cannot find required key 'name'. Path: ''"
+ - "Enum 'ab' does not exist. Path: '/blood'"
+ - "Key 'nam' was not defined. Path: ''"
+ - "Value 'foo(at)mail.com' does not match pattern '.+@.+'. Path: '/email'"
+ - "Value 'twenty' is not of type 'int'. Path: '/age'"
+ ## Kwalify errors
+ # :required_nokey : 1:1:[/] key 'name:' is required.
+ # :key_undefined : 1:1:[/nam] key 'nam:' is undefined.
+ # :pattern_unmatch : 2:1:[/email] 'foo(at)mail.com': not matched to pattern /@/.
+ # :type_unmatch : 3:1:[/age] 'twenty': not a integer.
+ # :enum_notexist : 4:1:[/blood] 'ab': invalid blood value.
+ # :type_unmatch : 5:1:[/birth] 'Jul 01, 1985': not a date.
+---
+name: fail-mapping-9
+desc: Test that regexes can be 'required'
+data:
+ hello: Hi
+ person: Fred
+schema:
+ type: map
+ mapping:
+ regex;(person[1-9]):
+ required: True
+errors:
+ - "Cannot find required key 'regex;(person[1-9])'. Path: ''"
+ - "Key 'hello' does not match any regex 'person[1-9]'. Path: ''"
+ - "Key 'person' does not match any regex 'person[1-9]'. Path: ''" \ No newline at end of file
diff --git a/tests/files/fail/test_matching.yaml b/tests/files/fail/test_matching.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/fail/test_matching.yaml
diff --git a/tests/files/fail/test_merge.yaml b/tests/files/fail/test_merge.yaml
new file mode 100644
index 0000000..3b7eac8
--- /dev/null
+++ b/tests/files/fail/test_merge.yaml
@@ -0,0 +1,37 @@
+---
+name: fail-merge-1
+desc: merge maps
+schema:
+ type: map
+ mapping:
+ "group":
+ type: map
+ mapping:
+ "name": &name
+ type: str
+ required: true
+ "email": &email
+ type: str
+ pattern: .+@.+
+ required: False
+ "user":
+ type: map
+ mapping:
+ "name":
+ <<: *name # merge
+ length: {max: 16} # add
+ "email":
+ <<: *email # merge
+ required: true # override
+data:
+ group:
+ name: foo
+ email: foo@mail.com
+ user:
+ name: toooooo-looooong-naaaame
+errors:
+ - "Cannot find required key 'email'. Path: '/user'"
+ - "Value: 'toooooo-looooong-naaaame' has length of '24', greater than max limit '16'. Path: '/user/name'"
+ ## Kwalify errors
+ # :required_nokey : 5:3:[/user] key 'email:' is required.
+ # :length_toolong : 5:3:[/user/name] 'toooooo-looooong-naaaame': too long (length 24 > max 16).
diff --git a/tests/files/fail/test_name.yaml b/tests/files/fail/test_name.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/fail/test_name.yaml
diff --git a/tests/files/fail/test_nullable.yaml b/tests/files/fail/test_nullable.yaml
new file mode 100644
index 0000000..8ce8259
--- /dev/null
+++ b/tests/files/fail/test_nullable.yaml
@@ -0,0 +1,19 @@
+---
+name: fail-nullable-1
+desc:
+data:
+ - name:
+ email: foo@mail.com
+ - email: bar@mail.net
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ name:
+ type: str
+ nullable: False
+ email:
+ type: str
+errors:
+ - "nullable.novalue : '/0/name'"
diff --git a/tests/files/fail/test_pattern.yaml b/tests/files/fail/test_pattern.yaml
new file mode 100644
index 0000000..4531d55
--- /dev/null
+++ b/tests/files/fail/test_pattern.yaml
@@ -0,0 +1,26 @@
+---
+name: fail-pattern-1
+desc:
+data:
+ email: foo(at)mail.com
+schema:
+ type: map
+ mapping:
+ email:
+ type: str
+ pattern: .+@.+
+errors:
+ - "Value 'foo(at)mail.com' does not match pattern '.+@.+'. Path: '/email'"
+---
+name: fail-pattern-2
+desc:
+data:
+ d: 'a'
+schema:
+ type: map
+ mapping:
+ d:
+ type: str
+ pattern: '[0-9]+'
+errors:
+ - "Value 'a' does not match pattern '[0-9]+'. Path: '/d'"
diff --git a/tests/files/fail/test_range.yaml b/tests/files/fail/test_range.yaml
new file mode 100644
index 0000000..22bd801
--- /dev/null
+++ b/tests/files/fail/test_range.yaml
@@ -0,0 +1,219 @@
+---
+name: fail-range-1
+desc:
+data:
+ - foo
+ - bar
+ - foobar
+schema:
+ type: seq
+ sequence:
+ - type: str
+ range:
+ max: 5
+ min: 1
+errors:
+ - "Type 'scalar' has size of '6', greater than max limit '5'. Path: '/2'"
+---
+name: fail-range-2
+desc: Test that range validates on 'map' raise correct error
+data:
+ streams:
+ sampleRateMultiple: 1
+schema:
+ type: map
+ mapping:
+ streams:
+ type: map
+ range:
+ min: 2
+ max: 3
+ mapping:
+ sampleRateMultiple:
+ type: int
+ required: True
+errors:
+ - "Type 'map' has size of '1', less than min limit '2'. Path: '/streams'"
+---
+name: fail-range-3
+desc: Test that range validates on 'seq' raise correct error
+data:
+ - foobar
+ - barfoo
+ - opa
+schema:
+ type: seq
+ range:
+ min: 1
+ max: 2
+ sequence:
+ - type: str
+errors:
+ - "Type 'seq' has size of '3', greater than max limit '2'. Path: ''"
+---
+name: fail-range-4
+desc: Test float range value out of range
+data:
+ the_float: 1.2
+ the_float_ex: 2.1
+schema:
+ type: map
+ mapping:
+ the_float:
+ type: float
+ required: True
+ range:
+ min: 2.1
+ max: 3.2
+ the_float_ex:
+ type: float
+ required: True
+ range:
+ min-ex: 2.1
+ max-ex: 3.2
+errors:
+ - "Type 'scalar' has size of '1.2', less than min limit '2.1'. Path: '/the_float'"
+ - "Type 'scalar' has size of '2.1', less than or equals to min limit(exclusive) '2.1'. Path: '/the_float_ex'"
+---
+name: fail-range-1
+desc: range test && bug#?????
+schema:
+ type: map
+ mapping:
+ "max-only":
+ type: seq
+ sequence:
+ - type: number
+ required: true
+ range: {max: 100}
+ "min-only":
+ type: seq
+ sequence:
+ - type: number
+ required: true
+ range: {min: 10.0}
+ "max-and-min":
+ type: seq
+ sequence:
+ - type: number
+ required: true
+ range: {max: 100.0, min: 10.0}
+data:
+ max-only:
+ - 101
+ - 100.1
+ min-only:
+ - 9
+ - 9.99
+ max-and-min:
+ - 101
+ - 100.1
+ - 9
+ - 9.99
+errors:
+ - "Type 'scalar' has size of '100.1', greater than max limit '100'. Path: '/max-only/1'"
+ - "Type 'scalar' has size of '100.1', greater than max limit '100.0'. Path: '/max-and-min/1'"
+ - "Type 'scalar' has size of '101', greater than max limit '100'. Path: '/max-only/0'"
+ - "Type 'scalar' has size of '101', greater than max limit '100.0'. Path: '/max-and-min/0'"
+ - "Type 'scalar' has size of '9', less than min limit '10.0'. Path: '/max-and-min/2'"
+ - "Type 'scalar' has size of '9', less than min limit '10.0'. Path: '/min-only/0'"
+ - "Type 'scalar' has size of '9.99', less than min limit '10.0'. Path: '/max-and-min/3'"
+ - "Type 'scalar' has size of '9.99', less than min limit '10.0'. Path: '/min-only/1'"
+ ## Kwalify errors
+ # :range_toolarge : 2:3:[/max-only/0] '101': too large (> max 100).
+ # :range_toolarge : 3:3:[/max-only/1] '100.1': too large (> max 100).
+ # :range_toosmall : 5:3:[/min-only/0] '9': too small (< min 10.0).
+ # :range_toosmall : 6:3:[/min-only/1] '9.99': too small (< min 10.0).
+ # :range_toolarge : 8:3:[/max-and-min/0] '101': too large (> max 100.0).
+ # :range_toolarge : 9:3:[/max-and-min/1] '100.1': too large (> max 100.0).
+ # :range_toosmall : 10:3:[/max-and-min/2] '9': too small (< min 10.0).
+ # :range_toosmall : 11:3:[/max-and-min/3] '9.99': too small (< min 10.0).
+---
+name: fail-range-2
+desc: range test (with max-ex and min-ex)
+schema:
+ type: map
+ mapping:
+ "max-ex-only":
+ type: seq
+ sequence:
+ - type: number
+ required: true
+ range: {max-ex: 100}
+ "min-ex-only":
+ type: seq
+ sequence:
+ - type: number
+ required: true
+ range: {min-ex: 10.0}
+ "max-ex-and-min-ex":
+ type: seq
+ sequence:
+ - type: number
+ required: true
+ range: {max-ex: 100.0, min-ex: 10.0}
+data:
+ max-ex-only:
+ - 100
+ - 100.0
+ min-ex-only:
+ - 10
+ - 10.0
+ max-ex-and-min-ex:
+ - 100
+ - 100.0
+ - 10
+ - 10.0
+errors:
+ - "Type 'scalar' has size of '10', less than or equals to min limit(exclusive) '10.0'. Path: '/max-ex-and-min-ex/2'"
+ - "Type 'scalar' has size of '10', less than or equals to min limit(exclusive) '10.0'. Path: '/min-ex-only/0'"
+ - "Type 'scalar' has size of '10.0', less than or equals to min limit(exclusive) '10.0'. Path: '/max-ex-and-min-ex/3'"
+ - "Type 'scalar' has size of '10.0', less than or equals to min limit(exclusive) '10.0'. Path: '/min-ex-only/1'"
+ - "Type 'scalar' has size of '100', greater than or equals to max limit(exclusive) '100'. Path: '/max-ex-only/0'"
+ - "Type 'scalar' has size of '100', greater than or equals to max limit(exclusive) '100.0'. Path: '/max-ex-and-min-ex/0'"
+ - "Type 'scalar' has size of '100.0', greater than or equals to max limit(exclusive) '100'. Path: '/max-ex-only/1'"
+ - "Type 'scalar' has size of '100.0', greater than or equals to max limit(exclusive) '100.0'. Path: '/max-ex-and-min-ex/1'"
+ ## Kwalify errors
+ # :range_toolargeex : 2:3:[/max-ex-only/0] '100': too large (>= max 100).
+ # :range_toolargeex : 3:3:[/max-ex-only/1] '100.0': too large (>= max 100).
+ # :range_toosmallex : 5:3:[/min-ex-only/0] '10': too small (<= min 10.0).
+ # :range_toosmallex : 6:3:[/min-ex-only/1] '10.0': too small (<= min 10.0).
+ # :range_toolargeex : 8:3:[/max-ex-and-min-ex/0] '100': too large (>= max 100.0).
+ # :range_toolargeex : 9:3:[/max-ex-and-min-ex/1] '100.0': too large (>= max 100.0).
+ # :range_toosmallex : 10:3:[/max-ex-and-min-ex/2] '10': too small (<= min 10.0).
+ # :range_toosmallex : 11:3:[/max-ex-and-min-ex/3] '10.0': too small (<= min 10.0).
+---
+name: fail-range-3
+desc: range test (with max, min, max-ex and min-ex)
+schema:
+ type: map
+ mapping:
+ "A":
+ type: seq
+ sequence:
+ - type: number
+ required: true
+ range: {max: 100, min-ex: 10.0}
+ "B":
+ type: seq
+ sequence:
+ - type: number
+ required: true
+ range: {min: 10, max-ex: 100.0}
+data:
+ A:
+ - 100.00001
+ - 10.0
+ B:
+ - 9.99999
+ - 100.0
+errors:
+ - "Type 'scalar' has size of '10.0', less than or equals to min limit(exclusive) '10.0'. Path: '/A/1'"
+ - "Type 'scalar' has size of '100.0', greater than or equals to max limit(exclusive) '100.0'. Path: '/B/1'"
+ - "Type 'scalar' has size of '100.00001', greater than max limit '100'. Path: '/A/0'"
+ - "Type 'scalar' has size of '9.99999', less than min limit '10'. Path: '/B/0'"
+ ## Kwalify errors
+ # :range_toolarge : 2:3:[/A/0] '100.00001': too large (> max 100)"
+ # :range_toosmallex : 3:3:[/A/1] '10.0': too small (<= min 10.0).
+ # :range_toosmall : 5:3:[/B/0] '9.99999': too small (< min 10).
+ # :range_toolargeex : 6:3:[/B/1] '100.0': too large (>= max 100.0).
diff --git a/tests/files/fail/test_required.yaml b/tests/files/fail/test_required.yaml
new file mode 100644
index 0000000..6038544
--- /dev/null
+++ b/tests/files/fail/test_required.yaml
@@ -0,0 +1,19 @@
+---
+name: fail-required-1
+desc:
+data:
+ - name: foo
+ email: foo@mail.com
+ - email: bar@mail.net
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ name:
+ type: str
+ required: True
+ email:
+ type: str
+errors:
+ - "Cannot find required key 'name'. Path: '/1'"
diff --git a/tests/files/fail/test_schema.yaml b/tests/files/fail/test_schema.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/fail/test_schema.yaml
diff --git a/tests/files/fail/test_sequence.yaml b/tests/files/fail/test_sequence.yaml
new file mode 100644
index 0000000..e26c4a2
--- /dev/null
+++ b/tests/files/fail/test_sequence.yaml
@@ -0,0 +1,71 @@
+---
+name: fail-sequence-1
+desc:
+data:
+ - 1
+ - 2
+ - 3
+ - True
+ - False
+schema:
+ type: seq
+ sequence:
+ - type: str
+errors:
+ - "Value '1' is not of type 'str'. Path: '/0'"
+ - "Value '2' is not of type 'str'. Path: '/1'"
+ - "Value '3' is not of type 'str'. Path: '/2'"
+ - "Value 'True' is not of type 'str'. Path: '/3'"
+ - "Value 'False' is not of type 'str'. Path: '/4'"
+---
+name: fail-sequence-2
+desc:
+data:
+ - True
+ - False
+ - 1
+schema:
+ type: seq
+ sequence:
+ - type: bool
+errors:
+ - "Value '1' is not of type 'bool'. Path: '/2'"
+---
+name: fail-sequence-3
+desc: sequence test
+schema:
+ type: seq
+ required: true
+ sequence:
+ - type: str
+ required: true
+data:
+ - foo
+ - bar
+ -
+ - baz
+ - 100
+errors:
+ - "Value '100' is not of type 'str'. Path: '/4'"
+ - "required.novalue : '/2'"
+ ## Kwalify errors
+ # - "Value 'None' is not of type 'str'. Path: '/2'"
+ # :required_novalue : (line 3)[/2] value required but none.
+ # :type_unmatch : (line 5)[/4] '100': not a string.
+---
+name: fail-sequence-4
+desc: Test that very deep nested sequences fail when schema expected sequence but value was something else
+schema:
+ type: seq
+ sequence:
+ - type: seq
+ sequence:
+ - type: seq
+ sequence:
+ - type: seq
+ sequence:
+ - type: str
+data:
+ - - - 1
+errors:
+ - "Value '1' is not a list. Value path: '/0/0/0'"
diff --git a/tests/files/fail/test_sequence_multi.yaml b/tests/files/fail/test_sequence_multi.yaml
new file mode 100644
index 0000000..a5007a0
--- /dev/null
+++ b/tests/files/fail/test_sequence_multi.yaml
@@ -0,0 +1,31 @@
+---
+name: fail-sequence-multi-1
+desc: Test multiple sequence values with wrong sub type and 'all' matching rule
+data:
+ - "foo"
+schema:
+ type: seq
+ matching: "all"
+ seq:
+ - type: str
+ - type: int
+errors:
+ - "Value 'foo' is not of type 'int'. Path: '/0'"
+---
+name: fail-sequence-multi-2
+desc: Test multiple nested sequence values with error in level 2 with 'any' matching rule
+data:
+ - - 123
+ - "foobar"
+schema:
+ type: seq
+ matching: "any"
+ seq:
+ - type: str
+ - type: seq
+ matching: "any"
+ sequence:
+ - type: str
+errors:
+ - "Value '123' is not of type 'str'. Path: '/0/0'"
+ - "Value '[123]' is not of type 'str'. Path: '/0'"
diff --git a/tests/files/fail/test_type_any.yaml b/tests/files/fail/test_type_any.yaml
new file mode 100644
index 0000000..5c8e7fa
--- /dev/null
+++ b/tests/files/fail/test_type_any.yaml
@@ -0,0 +1 @@
+# Becuase type 'any' validates for any kind of data there is no failure case for this type.
diff --git a/tests/files/fail/test_type_bool.yaml b/tests/files/fail/test_type_bool.yaml
new file mode 100644
index 0000000..1007348
--- /dev/null
+++ b/tests/files/fail/test_type_bool.yaml
@@ -0,0 +1,52 @@
+---
+name: fail-type-bool-1
+desc: Test wrong type as value in list
+data:
+ - "foo"
+schema:
+ type: seq
+ matching: "any"
+ seq:
+ - type: bool
+errors:
+ - "Value 'foo' is not of type 'bool'. Path: '/0'"
+---
+name: fail-type-bool-2
+desc: Test bool value inside list
+data:
+ - 'abc'
+ - 123
+schema:
+ type: seq
+ sequence:
+ - type: bool
+errors:
+ - "Value '123' is not of type 'bool'. Path: '/1'"
+ - "Value 'abc' is not of type 'bool'. Path: '/0'"
+---
+name: fail-type-bool-3
+desc: Test bool value in mapping
+data:
+ foo: 'abc'
+schema:
+ type: map
+ mapping:
+ foo:
+ type: bool
+errors:
+ - "Value 'abc' is not of type 'bool'. Path: '/foo'"
+---
+name: fail-type-bool-4
+desc: Test bool inside nested map & seq
+data:
+ foo:
+ - 'abc'
+schema:
+ type: map
+ mapping:
+ foo:
+ type: seq
+ sequence:
+ - type: bool
+errors:
+ - "Value 'abc' is not of type 'bool'. Path: '/foo/0'"
diff --git a/tests/files/fail/test_type_date.yaml b/tests/files/fail/test_type_date.yaml
new file mode 100644
index 0000000..5a1b39a
--- /dev/null
+++ b/tests/files/fail/test_type_date.yaml
@@ -0,0 +1,83 @@
+---
+name: fail-type-date-1
+desc: basic test for date type with default formats
+data: "abc"
+schema:
+ type: date
+errors:
+ - "Not a valid date: abc Path: ''"
+---
+name: fail-type-date-2
+desc: Basic test for date type with defined date-formats
+data: "31-01-2017"
+schema:
+ type: date
+ format: "%Y-%m-%d"
+errors:
+ - "Not a valid date: 31-01-2017 format: %Y-%m-%d. Path: ''"
+---
+name: fail-type-date-3
+desc: Basic test for date type with defined date-formats
+data:
+ - "2017"
+ - "31"
+schema:
+ type: seq
+ sequence:
+ - type: date
+ format:
+ - "%d-%m-%Y"
+ - "%Y-%m-%d"
+errors:
+ - "Not a valid date: 2017 format: %Y-%m-%d. Path: '/0'"
+ - "Not a valid date: 31 format: %Y-%m-%d. Path: '/1'"
+---
+name: fail-type-data-4
+desc: Test date type as values in a list
+data:
+ - 'abc-1997'
+ - 'abc-1997-07'
+ - 'abc-1997-07-16'
+ - 'abc-1997-07-16T19:20+01:00'
+ - 'abc-1997-07-16T19:20:30+01:00'
+ - 'abc-1997-07-16T19:20:30.45+01:00'
+schema:
+ type: seq
+ sequence:
+ - type: date
+errors:
+ - "Not a valid date: abc-1997 Path: '/0'"
+ - "Not a valid date: abc-1997-07 Path: '/1'"
+ - "Not a valid date: abc-1997-07-16 Path: '/2'"
+ - "Not a valid date: abc-1997-07-16T19:20+01:00 Path: '/3'"
+ - "Not a valid date: abc-1997-07-16T19:20:30+01:00 Path: '/4'"
+ - "Not a valid date: abc-1997-07-16T19:20:30.45+01:00 Path: '/5'"
+---
+name: fail-type-date-5
+desc: Test that wrong value types do not validate
+data:
+ - 123
+ - True
+schema:
+ type: seq
+ sequence:
+ - type: date
+errors:
+ - "Value '123' is not of type 'date'. Path: '/0'"
+ - "Value 'True' is not of type 'date'. Path: '/1'"
+---
+name: fail-type-date-6
+desc: Test that wrong value types in map do not validate
+data:
+ foo: 123
+ bar: True
+schema:
+ type: map
+ mapping:
+ foo:
+ type: date
+ bar:
+ type: date
+errors:
+ - "Value '123' is not of type 'date'. Path: '/foo'"
+ - "Value 'True' is not of type 'date'. Path: '/bar'"
diff --git a/tests/files/fail/test_type_float.yaml b/tests/files/fail/test_type_float.yaml
new file mode 100644
index 0000000..bef14b8
--- /dev/null
+++ b/tests/files/fail/test_type_float.yaml
@@ -0,0 +1,56 @@
+---
+name: fail-type-float-1
+desc: Test simples float value
+data: "abc"
+schema:
+ type: float
+errors:
+ - "Value 'abc' is not of type 'float'. Path: ''"
+---
+name: fail-type-float-2
+desc: Test wrong type as value in list
+data:
+ - "foo"
+ - True
+schema:
+ type: seq
+ seq:
+ - type: float
+errors:
+ - "Value 'foo' is not of type 'float'. Path: '/0'"
+errors:
+ - "Value 'True' is not of type 'float'. Path: '/1'"
+ - "Value 'foo' is not of type 'float'. Path: '/0'"
+---
+name: fail-type-float-3
+desc: Test float value in mapping
+data:
+ foo: "abc"
+ bar: True
+schema:
+ type: map
+ mapping:
+ foo:
+ type: float
+ bar:
+ type: float
+errors:
+ - "Value 'True' is not of type 'float'. Path: '/bar'"
+ - "Value 'abc' is not of type 'float'. Path: '/foo'"
+---
+name: fail-type-float-4
+desc: Test float inside nested map & seq
+data:
+ foo:
+ - True
+ - "abc"
+schema:
+ type: map
+ mapping:
+ foo:
+ type: seq
+ sequence:
+ - type: float
+errors:
+ - "Value 'True' is not of type 'float'. Path: '/foo/0'"
+ - "Value 'abc' is not of type 'float'. Path: '/foo/1'"
diff --git a/tests/files/fail/test_type_int.yaml b/tests/files/fail/test_type_int.yaml
new file mode 100644
index 0000000..62d8fe1
--- /dev/null
+++ b/tests/files/fail/test_type_int.yaml
@@ -0,0 +1,45 @@
+---
+name: fail-type-int-1
+desc:
+data:
+ A101
+schema:
+ type: int
+errors:
+ - "Value 'A101' is not of type 'int'. Path: ''"
+---
+name: fail-type-int-2
+desc: Test wrong type as value in list
+data:
+ - "foo"
+schema:
+ type: seq
+ matching: "any"
+ seq:
+ - type: int
+errors:
+ - "Value 'foo' is not of type 'int'. Path: '/0'"
+---
+name: fail-type-int-3
+desc: Test that True/False is not valid integers
+data:
+ - 1
+ - True
+ - False
+schema:
+ type: seq
+ sequence:
+ - type: int
+errors:
+ - "Value 'False' is not of type 'int'. Path: '/2'"
+ - "Value 'True' is not of type 'int'. Path: '/1'"
+---
+name: fail-type-int-4
+desc: Test that hexadecimal characters fails with pattern
+data:
+ 0x12345678
+schema:
+ type: text
+ pattern: ^0x[0-9A-F]{1,8}$
+errors:
+ - "Value '305419896' does not match pattern '^0x[0-9A-F]{1,8}$'. Path: ''"
diff --git a/tests/files/fail/test_type_map.yaml b/tests/files/fail/test_type_map.yaml
new file mode 100644
index 0000000..a46086b
--- /dev/null
+++ b/tests/files/fail/test_type_map.yaml
@@ -0,0 +1,46 @@
+---
+name: fail-type-map-1
+desc: Test the most basic case for map
+data:
+ - 'foo'
+schema:
+ type: map
+ mapping:
+ foo:
+ type: str
+errors:
+ - "Value '['foo']' is not a dict. Value path: ''"
+---
+name: type-map-2
+desc:
+data:
+ - - 'foo'
+ - - 'foo'
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ foo:
+ type: str
+errors:
+ - "Value '['foo']' is not a dict. Value path: '/0'"
+ - "Value '['foo']' is not a dict. Value path: '/1'"
+---
+name: type-map-3
+desc: Test bool inside nested map & seq
+data:
+ foo:
+ - - 'foo'
+schema:
+ type: map
+ mapping:
+ foo:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ bar:
+ type: str
+errors:
+ - "Value '['foo']' is not a dict. Value path: '/foo/0'"
diff --git a/tests/files/fail/test_type_none.yaml b/tests/files/fail/test_type_none.yaml
new file mode 100644
index 0000000..cc7c7df
--- /dev/null
+++ b/tests/files/fail/test_type_none.yaml
@@ -0,0 +1,54 @@
+#
+# NOTE: This case is not allowed becuase Core class do NOT allow
+# there is no data to validate. This happens if None is at top level
+# of the data structure.
+#
+# ---
+# name: type-none-1
+# desc: Most basic test for type None
+# data: ~
+# schema:
+# type: none
+---
+name: fail-type-none-2
+desc: Test that none type works with none type as value in map
+data:
+ name: 'abc'
+schema:
+ type: map
+ mapping:
+ name:
+ type: none
+errors:
+ - "Value 'abc' is not of type 'none'. Path: '/name'"
+---
+name: fail-type-none-3
+desc: Test that none type works as value in sequence
+data:
+ - 'abc'
+ - 123
+schema:
+ type: seq
+ sequence:
+ - type: none
+errors:
+ - "Value '123' is not of type 'none'. Path: '/1'"
+ - "Value 'abc' is not of type 'none'. Path: '/0'"
+---
+name: fail-type-none-4
+desc: Test that none type works inside nested map, seq, map
+data:
+ foo:
+ - bar: 'abc'
+schema:
+ type: map
+ mapping:
+ foo:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ bar:
+ type: none
+errors:
+ - "Value 'abc' is not of type 'none'. Path: '/foo/0/bar'"
diff --git a/tests/files/fail/test_type_number.yaml b/tests/files/fail/test_type_number.yaml
new file mode 100644
index 0000000..003b0a6
--- /dev/null
+++ b/tests/files/fail/test_type_number.yaml
@@ -0,0 +1,83 @@
+---
+name: fail-type-number-1
+desc: This tests number validation rule with wrong data types
+data: True
+schema:
+ type: number
+errors:
+ - "Value 'True' is not of type 'number'. Path: ''"
+---
+name: fail-type-number-2
+desc: Test that number type works with as value in map
+data:
+ foo: True
+ bar: 'abc'
+ qwe: []
+ rty: {}
+schema:
+ type: map
+ mapping:
+ foo:
+ type: number
+ bar:
+ type: number
+ qwe:
+ type: number
+ rty:
+ type: number
+errors:
+ - "Value 'True' is not of type 'number'. Path: '/foo'"
+ - "Value '[]' is not of type 'number'. Path: '/qwe'"
+ - "Value 'abc' is not of type 'number'. Path: '/bar'"
+ - "Value '{}' is not of type 'number'. Path: '/rty'"
+---
+name: fail-type-number-3
+desc: Test that different number values works as values in seq
+data:
+ - True
+ - 'abc'
+ - {}
+ - []
+schema:
+ type: seq
+ sequence:
+ - type: number
+errors:
+ - "Value 'True' is not of type 'number'. Path: '/0'"
+ - "Value '[]' is not of type 'number'. Path: '/3'"
+ - "Value 'abc' is not of type 'number'. Path: '/1'"
+ - "Value '{}' is not of type 'number'. Path: '/2'"
+---
+name: fail-type-number-4
+desc: Test that number type works inside nested map, seq, map
+data:
+ foobar:
+ - foo: True
+ bar: 'abc'
+ qwe: {}
+ rty: []
+schema:
+ type: map
+ mapping:
+ foobar:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ foo:
+ type: number
+ bar:
+ type: number
+ qwe:
+ type: number
+ rty:
+ type: number
+ ewq:
+ type: number
+ dsa:
+ type: number
+errors:
+ - "Value 'True' is not of type 'number'. Path: '/foobar/0/foo'"
+ - "Value '[]' is not of type 'number'. Path: '/foobar/0/rty'"
+ - "Value 'abc' is not of type 'number'. Path: '/foobar/0/bar'"
+ - "Value '{}' is not of type 'number'. Path: '/foobar/0/qwe'"
diff --git a/tests/files/fail/test_type_scalar.yaml b/tests/files/fail/test_type_scalar.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/fail/test_type_scalar.yaml
diff --git a/tests/files/fail/test_type_seq.yaml b/tests/files/fail/test_type_seq.yaml
new file mode 100644
index 0000000..a5baacd
--- /dev/null
+++ b/tests/files/fail/test_type_seq.yaml
@@ -0,0 +1,47 @@
+#
+# TODO: All of these failure tests currently raises a strange error that might not be the correct one and might require some redesign of the implementation.
+# pykwalify.errors.NotSequenceError: <NotSequenceError: error code 7: Value: {} is not of a sequence type: Path: '/'>
+#
+
+# ---
+# name: fail-type-seq-1
+# desc: Test the most basic case for seq
+# data:
+# {}
+# schema:
+# type: seq
+# sequence:
+# - type: str
+# errors:
+# - ''
+# ---
+# name: fail-type-seq-2
+# desc: Test that seq in seq works
+# data:
+# - {}
+# - {}
+# schema:
+# type: seq
+# sequence:
+# - type: seq
+# sequence:
+# - type: bool
+# errors:
+# - ''
+# ---
+# name: fail-type-seq-3
+# desc: Test bool inside nested map & seq
+# data:
+# - foo:
+# {}
+# schema:
+# type: seq
+# sequence:
+# - type: map
+# mapping:
+# foo:
+# type: seq
+# sequence:
+# - type: bool
+# errors:
+# - ''
diff --git a/tests/files/fail/test_type_str.yaml b/tests/files/fail/test_type_str.yaml
new file mode 100644
index 0000000..13cdbb6
--- /dev/null
+++ b/tests/files/fail/test_type_str.yaml
@@ -0,0 +1,73 @@
+---
+name: fail-type-str-1
+desc: Test simples str value
+data: 1
+schema:
+ type: str
+errors:
+ - "Value '1' is not of type 'str'. Path: ''"
+---
+name: fail-type-str-2
+desc: Test str value inside list
+data:
+ - 1
+ - True
+schema:
+ type: seq
+ sequence:
+ - type: str
+errors:
+ - "Value '1' is not of type 'str'. Path: '/0'"
+ - "Value 'True' is not of type 'str'. Path: '/1'"
+---
+name: fail-type-str-3
+desc: Test str value in mapping
+data:
+ foo: 1
+schema:
+ type: map
+ mapping:
+ foo:
+ type: str
+errors:
+ - "Value '1' is not of type 'str'. Path: '/foo'"
+---
+name: fail-type-str-4
+desc: Test str inside nested map & seq
+data:
+ foo:
+ - 1
+schema:
+ type: map
+ mapping:
+ foo:
+ type: seq
+ sequence:
+ - type: str
+errors:
+ - "Value '1' is not of type 'str'. Path: '/foo/0'"
+---
+name: fail-deftype-1
+desc: default type test
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ "name":
+ "email":
+data:
+ - name: 123
+ email: true
+ - name: 3.14
+ - email: 2004-01-01
+errors:
+ - "Value '123' is not of type 'str'. Path: '/0/name'"
+ - "Value '2004-01-01' is not of type 'str'. Path: '/2/email'"
+ - "Value '3.14' is not of type 'str'. Path: '/1/name'"
+ - "Value 'True' is not of type 'str'. Path: '/0/email'"
+ ## Kwalify errors
+ # :type_unmatch : 1:3:[/0/name] '123': not a string.
+ # :type_unmatch : 2:3:[/0/email] 'true': not a string.
+ # :type_unmatch : 3:3:[/1/name] '3.14': not a string.
+ # :type_unmatch : 4:3:[/2/email] '2004-01-01': not a string.
diff --git a/tests/files/fail/test_type_symbol.yaml b/tests/files/fail/test_type_symbol.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/fail/test_type_symbol.yaml
diff --git a/tests/files/fail/test_type_text.yaml b/tests/files/fail/test_type_text.yaml
new file mode 100644
index 0000000..f4e6d2e
--- /dev/null
+++ b/tests/files/fail/test_type_text.yaml
@@ -0,0 +1,70 @@
+---
+name: fail-type-text-1
+desc: Test simples text type
+data: True
+schema:
+ type: text
+errors:
+ - "Value 'True' is not of type 'text'. Path: ''"
+---
+name: fail-type-text-2
+desc: Test possible values as values in seq
+data:
+ - abc
+ - 123
+ - 3.14159
+ - True
+schema:
+ type: seq
+ sequence:
+ - type: text
+errors:
+ - "Value 'True' is not of type 'text'. Path: '/3'"
+---
+name: fail-type-text-3
+desc: Test possible values as values in map
+data:
+ foo: abc
+ bar: 123
+ qwe: 3.14159
+ rty: True
+schema:
+ type: map
+ mapping:
+ foo:
+ type: text
+ bar:
+ type: text
+ qwe:
+ type: text
+ rty:
+ type: text
+errors:
+ - "Value 'True' is not of type 'text'. Path: '/rty'"
+---
+name: fail-type-text-4
+desc: Test that text type works inside nested map, seq, map
+data:
+ foobar:
+ - foo: abc
+ bar: 123
+ qwe: 3.14159
+ rty: True
+schema:
+ type: map
+ mapping:
+ foobar:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ foo:
+ type: text
+ bar:
+ type: text
+ qwe:
+ type: text
+ rty:
+ type: text
+errors:
+ - "Value 'True' is not of type 'text'. Path: '/foobar/0/rty'"
diff --git a/tests/files/fail/test_type_timestamp.yaml b/tests/files/fail/test_type_timestamp.yaml
new file mode 100644
index 0000000..1a5e120
--- /dev/null
+++ b/tests/files/fail/test_type_timestamp.yaml
@@ -0,0 +1,15 @@
+---
+name: fail-type-timestamp-1
+desc: Test timestamps that should throw errors
+data:
+ d1: ""
+ d2: "1427650980"
+schema:
+ type: map
+ mapping:
+ d1:
+ type: timestamp
+ d2:
+ type: timestamp
+errors:
+ - "Timestamp value is empty. Path: '/d1'"
diff --git a/tests/files/fail/test_unique.yaml b/tests/files/fail/test_unique.yaml
new file mode 100644
index 0000000..0af5d7c
--- /dev/null
+++ b/tests/files/fail/test_unique.yaml
@@ -0,0 +1,109 @@
+---
+name: fail-unique-1
+desc: "NOTE: The reverse unique do not currently work proper # This will test the unique constraint but should fail"
+data:
+ - name: foo
+ email: admin@mail.com
+ groups:
+ - foo
+ - users
+ - admin
+ - foo
+ - name: bar
+ email: admin@mail.com
+ groups:
+ - admin
+ - users
+ - name: bar
+ email: baz@mail.com
+ groups:
+ - users
+schema:
+ type: seq
+ sequence:
+ - type: map
+ required: True
+ mapping:
+ name:
+ type: str
+ required: True
+ unique: True
+ email:
+ type: str
+ groups:
+ type: seq
+ sequence:
+ - type: str
+ unique: True
+errors:
+ - "Value 'bar' is not unique. Previous path: '/1/name'. Path: '/2/name'"
+ - "Value 'foo' is not unique. Previous path: '/0/groups/0'. Path: '/0/groups/3'"
+---
+name: fail-unique-2
+desc: unique constraint test with map
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ "name":
+ unique: true
+ "age":
+ type: int
+data:
+ - name: foo
+ age: 10
+ - name: bar
+ age: 10
+ - age: 10
+ name: bar
+errors:
+ - "Value 'bar' is not unique. Previous path: '/1/name'. Path: '/2/name'"
+ ## Kwalify errors
+ # :value_notunique : 6:3:[/2/name] 'bar': is already used at '/1/name'.
+---
+name: fail-unique-3
+desc: unique constraint test with seq
+schema:
+ type: seq
+ sequence:
+ - type: str
+ unique: true
+data:
+ - foo
+ - ~
+ - bar
+ - ~
+ - bar
+errors:
+ - "Value 'bar' is not unique. Previous path: '/2'. Path: '/4'"
+ ## Kwalify errors
+ # :value_notunique : 5:1:[/4] 'bar': is already used at '/2'.
+---
+name: fail-unique-4
+desc: unique constraint and '<<' (merge)
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ "name":
+ type: str
+ required: true
+ unique: true
+ "value":
+ type: any
+ required: true
+data:
+ - &a1
+ name: x1
+ value: 10
+ - <<: *a1
+ - <<: *a1 # wrong validation error
+ name: x3
+errors:
+ - "Value 'x1' is not unique. Previous path: '/0/name'. Path: '/1/name'"
+ # TODO: Possibly missing one error here...
+ ## Kwalify errors
+ # :value_notunique : 4:3:[/1/name] 'x1': is already used at '/0/name'.
+ # :value_notunique : 5:3:[/2/name] 'x1': is already used at '/0/name'.
diff --git a/tests/files/fail/test_version.yaml b/tests/files/fail/test_version.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/fail/test_version.yaml
diff --git a/tests/files/partial_schemas/1f-data.yaml b/tests/files/partial_schemas/1f-data.yaml
new file mode 100644
index 0000000..4444bad
--- /dev/null
+++ b/tests/files/partial_schemas/1f-data.yaml
@@ -0,0 +1 @@
+- foo: "opa" \ No newline at end of file
diff --git a/tests/files/partial_schemas/1f-partials.yaml b/tests/files/partial_schemas/1f-partials.yaml
new file mode 100644
index 0000000..b93127a
--- /dev/null
+++ b/tests/files/partial_schemas/1f-partials.yaml
@@ -0,0 +1,11 @@
+schema;fooone:
+ type: map
+ mapping:
+ foo:
+ include: footwo
+
+schema;footwo:
+ type: map
+ mapping:
+ foo:
+ type: bool
diff --git a/tests/files/partial_schemas/1f-schema.yaml b/tests/files/partial_schemas/1f-schema.yaml
new file mode 100644
index 0000000..462d391
--- /dev/null
+++ b/tests/files/partial_schemas/1f-schema.yaml
@@ -0,0 +1,3 @@
+type: seq
+sequence:
+ - include: fooonez
diff --git a/tests/files/partial_schemas/1s-data.yaml b/tests/files/partial_schemas/1s-data.yaml
new file mode 100644
index 0000000..4444bad
--- /dev/null
+++ b/tests/files/partial_schemas/1s-data.yaml
@@ -0,0 +1 @@
+- foo: "opa" \ No newline at end of file
diff --git a/tests/files/partial_schemas/1s-partials.yaml b/tests/files/partial_schemas/1s-partials.yaml
new file mode 100644
index 0000000..ef84657
--- /dev/null
+++ b/tests/files/partial_schemas/1s-partials.yaml
@@ -0,0 +1,11 @@
+schema;fooone:
+ type: map
+ mapping:
+ foo:
+ type: str
+
+schema;footwo:
+ type: map
+ mapping:
+ foo:
+ type: bool \ No newline at end of file
diff --git a/tests/files/partial_schemas/1s-schema.yaml b/tests/files/partial_schemas/1s-schema.yaml
new file mode 100644
index 0000000..ea433b7
--- /dev/null
+++ b/tests/files/partial_schemas/1s-schema.yaml
@@ -0,0 +1,3 @@
+type: seq
+sequence:
+ - include: fooone
diff --git a/tests/files/partial_schemas/2f-data.yaml b/tests/files/partial_schemas/2f-data.yaml
new file mode 100644
index 0000000..b1d5359
--- /dev/null
+++ b/tests/files/partial_schemas/2f-data.yaml
@@ -0,0 +1 @@
+- True
diff --git a/tests/files/partial_schemas/2f-schema.yaml b/tests/files/partial_schemas/2f-schema.yaml
new file mode 100644
index 0000000..d5bd186
--- /dev/null
+++ b/tests/files/partial_schemas/2f-schema.yaml
@@ -0,0 +1,5 @@
+type: seq
+sequence:
+ - include: foo
+schema;foo:
+ type: str
diff --git a/tests/files/partial_schemas/2s-data.yaml b/tests/files/partial_schemas/2s-data.yaml
new file mode 100644
index 0000000..c67a590
--- /dev/null
+++ b/tests/files/partial_schemas/2s-data.yaml
@@ -0,0 +1,3 @@
+- foo:
+ bar:
+ - true
diff --git a/tests/files/partial_schemas/2s-partials.yaml b/tests/files/partial_schemas/2s-partials.yaml
new file mode 100644
index 0000000..238abb9
--- /dev/null
+++ b/tests/files/partial_schemas/2s-partials.yaml
@@ -0,0 +1,16 @@
+schema;footwo:
+ type: map
+ mapping:
+ bar:
+ include: foothree
+
+schema;fooone:
+ type: map
+ mapping:
+ foo:
+ include: footwo
+
+schema;foothree:
+ type: seq
+ sequence:
+ - type: bool
diff --git a/tests/files/partial_schemas/2s-schema.yaml b/tests/files/partial_schemas/2s-schema.yaml
new file mode 100644
index 0000000..ea433b7
--- /dev/null
+++ b/tests/files/partial_schemas/2s-schema.yaml
@@ -0,0 +1,3 @@
+type: seq
+sequence:
+ - include: fooone
diff --git a/tests/files/partial_schemas/3f-data.yaml b/tests/files/partial_schemas/3f-data.yaml
new file mode 100644
index 0000000..0ca9514
--- /dev/null
+++ b/tests/files/partial_schemas/3f-data.yaml
@@ -0,0 +1 @@
+True
diff --git a/tests/files/partial_schemas/3f-schema.yaml b/tests/files/partial_schemas/3f-schema.yaml
new file mode 100644
index 0000000..d83c47c
--- /dev/null
+++ b/tests/files/partial_schemas/3f-schema.yaml
@@ -0,0 +1,3 @@
+include: foo
+schema;foo:
+ type: str
diff --git a/tests/files/partial_schemas/4f-data.yaml b/tests/files/partial_schemas/4f-data.yaml
new file mode 100644
index 0000000..54c108e
--- /dev/null
+++ b/tests/files/partial_schemas/4f-data.yaml
@@ -0,0 +1,2 @@
+- foo:
+ - bar: True
diff --git a/tests/files/partial_schemas/4f-schema.yaml b/tests/files/partial_schemas/4f-schema.yaml
new file mode 100644
index 0000000..d109e40
--- /dev/null
+++ b/tests/files/partial_schemas/4f-schema.yaml
@@ -0,0 +1,20 @@
+type: seq
+sequence:
+ - include: fooone
+
+schema;fooone:
+ type: map
+ mapping:
+ foo:
+ include: footwo
+
+schema;footwo:
+ type: seq
+ sequence:
+ - include: foothree
+
+schema;foothree:
+ type: map
+ mapping:
+ bar:
+ type: str
diff --git a/tests/files/partial_schemas/5f-data.yaml b/tests/files/partial_schemas/5f-data.yaml
new file mode 100644
index 0000000..78cc18b
--- /dev/null
+++ b/tests/files/partial_schemas/5f-data.yaml
@@ -0,0 +1 @@
+- - - - True
diff --git a/tests/files/partial_schemas/5f-schema.yaml b/tests/files/partial_schemas/5f-schema.yaml
new file mode 100644
index 0000000..788d906
--- /dev/null
+++ b/tests/files/partial_schemas/5f-schema.yaml
@@ -0,0 +1,18 @@
+type: seq
+sequence:
+ - include: fooone
+
+schema;fooone:
+ type: seq
+ sequence:
+ - include: footwo
+
+schema;footwo:
+ type: seq
+ sequence:
+ - include: foothree
+
+schema;foothree:
+ type: seq
+ sequence:
+ - type: str
diff --git a/tests/files/partial_schemas/6f-data.yaml b/tests/files/partial_schemas/6f-data.yaml
new file mode 100644
index 0000000..49b7ed7
--- /dev/null
+++ b/tests/files/partial_schemas/6f-data.yaml
@@ -0,0 +1,4 @@
+foo:
+ bar:
+ qwe:
+ ewq: True
diff --git a/tests/files/partial_schemas/6f-schema.yaml b/tests/files/partial_schemas/6f-schema.yaml
new file mode 100644
index 0000000..00c0a91
--- /dev/null
+++ b/tests/files/partial_schemas/6f-schema.yaml
@@ -0,0 +1,22 @@
+type: map
+mapping:
+ foo:
+ include: fooone
+
+schema;fooone:
+ type: map
+ mapping:
+ bar:
+ include: footwo
+
+schema;footwo:
+ type: map
+ mapping:
+ qwe:
+ include: foothree
+
+schema;foothree:
+ type: map
+ mapping:
+ ewq:
+ type: str
diff --git a/tests/files/partial_schemas/7s-data.yaml b/tests/files/partial_schemas/7s-data.yaml
new file mode 100644
index 0000000..d48b5a2
--- /dev/null
+++ b/tests/files/partial_schemas/7s-data.yaml
@@ -0,0 +1,5 @@
+foo: blah
+bar:
+ - blah
+ - blah
+ - blah
diff --git a/tests/files/partial_schemas/7s-schema.yaml b/tests/files/partial_schemas/7s-schema.yaml
new file mode 100644
index 0000000..eaf0613
--- /dev/null
+++ b/tests/files/partial_schemas/7s-schema.yaml
@@ -0,0 +1,12 @@
+type: map
+mapping:
+ foo:
+ type: str
+ required: True
+ bar:
+ include: bar
+schema;bar:
+ type: seq
+ required: True
+ sequence:
+ - type: str
diff --git a/tests/files/success/test_anchor.yaml b/tests/files/success/test_anchor.yaml
new file mode 100644
index 0000000..fa53412
--- /dev/null
+++ b/tests/files/success/test_anchor.yaml
@@ -0,0 +1,92 @@
+##
+---
+name: anchor1
+desc: schema with anchor
+#
+schema:
+ type: seq
+ required: true
+ sequence:
+ - type: map
+ required: true
+ mapping:
+ first-name: &name
+ type: str
+ required: True
+ family-name: *name
+#
+data:
+ - first-name: foo
+ family-name: Foo
+ - first-name: bar
+ family-name: Bar
+##
+---
+name: anchor2
+desc: schema with anchor 2
+#
+schema:
+ type: map
+ required: true
+ mapping:
+ title: &name
+ type: str
+ required: true
+ address-book:
+ type: seq
+ required: true
+ sequence:
+ - type: map
+ mapping:
+ name: *name
+ email:
+ type: str
+ required: True
+#
+data:
+ title: my friends
+ address-book:
+ - name: foo
+ email: foo@mail.com
+ - name: bar
+ email: bar@mail.com
+#
+# TODO: THIS TEST IS BROKEN BECUASE IT CAUSE INFINITE RECURSION IN PYTHON
+#
+# ##
+# ---
+# name: anchor3
+# desc: document with anchor
+# #
+# schema:
+# type: seq
+# sequence:
+# - &employee
+# type: map
+# mapping:
+# name:
+# type: str
+# post:
+# type: str
+# enum:
+# - exective
+# - manager
+# - clerk
+# supervisor: *employee
+# #
+# data:
+# - &foo
+# name: foo
+# post: exective
+# - &bar
+# name: bar
+# post: manager
+# supervisor: *foo
+# - &baz
+# name: baz
+# post: clerk
+# supervisor: *bar
+# - &zak
+# name: zak
+# post: clerk
+# supervisor: *bar
diff --git a/tests/files/success/test_assert.yaml b/tests/files/success/test_assert.yaml
new file mode 100644
index 0000000..46b3ef3
--- /dev/null
+++ b/tests/files/success/test_assert.yaml
@@ -0,0 +1,28 @@
+##
+---
+name: assert1
+desc: assert test
+#
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ "less-than":
+ type: number
+ assert: val < 8
+ "more-than":
+ type: number
+ assert: 3 < val
+ "between":
+ type: number
+ assert: 3 < val and val < 8
+ "except":
+ type: number
+ assert: val < 3 or 8 < val
+#
+data:
+ - less-than: 5
+ - more-than: 5
+ - between: 5
+ - except: 0
diff --git a/tests/files/success/test_default.yaml b/tests/files/success/test_default.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/success/test_default.yaml
diff --git a/tests/files/success/test_desc.yaml b/tests/files/success/test_desc.yaml
new file mode 100644
index 0000000..347a729
--- /dev/null
+++ b/tests/files/success/test_desc.yaml
@@ -0,0 +1,7 @@
+---
+name: desc-1
+desc: Test basic desc
+data: 'foobar'
+schema:
+ desc: This is a description...
+ type: str
diff --git a/tests/files/success/test_enum.yaml b/tests/files/success/test_enum.yaml
new file mode 100644
index 0000000..c583d42
--- /dev/null
+++ b/tests/files/success/test_enum.yaml
@@ -0,0 +1,12 @@
+---
+name: enum-1
+desc: Test simple enum
+data:
+ - A
+ - B
+ - O
+schema:
+ type: seq
+ sequence:
+ - type: str
+ enum: [A, B, O, AB]
diff --git a/tests/files/success/test_example.yaml b/tests/files/success/test_example.yaml
new file mode 100644
index 0000000..ee6965f
--- /dev/null
+++ b/tests/files/success/test_example.yaml
@@ -0,0 +1,5 @@
+---
+data: foo
+schema:
+ example: Foobar
+ type: str
diff --git a/tests/files/success/test_extensions.yaml b/tests/files/success/test_extensions.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/success/test_extensions.yaml
diff --git a/tests/files/success/test_func.yaml b/tests/files/success/test_func.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/success/test_func.yaml
diff --git a/tests/files/success/test_ident.yaml b/tests/files/success/test_ident.yaml
new file mode 100644
index 0000000..0f3c95c
--- /dev/null
+++ b/tests/files/success/test_ident.yaml
@@ -0,0 +1,22 @@
+##
+---
+name: ident1
+desc: ident constraint test
+#
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ "name":
+ ident: true
+ "age":
+ type: int
+#
+data:
+ - name: foo
+ age: 10
+ - name: bar
+ age: 10
+ - name: baz
+ age: 10
diff --git a/tests/files/success/test_include.yaml b/tests/files/success/test_include.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/success/test_include.yaml
diff --git a/tests/files/success/test_length.yaml b/tests/files/success/test_length.yaml
new file mode 100644
index 0000000..1b8d7d8
--- /dev/null
+++ b/tests/files/success/test_length.yaml
@@ -0,0 +1,98 @@
+---
+name: length1
+desc: length test
+schema:
+ type: map
+ mapping:
+ "max-only":
+ type: seq
+ sequence:
+ - type: str
+ length:
+ max: 8
+ "min-only":
+ type: seq
+ sequence:
+ - type: str
+ length:
+ min: 4
+ "max-and-min":
+ type: seq
+ sequence:
+ - type: str
+ length:
+ max: 8
+ min: 4
+data:
+ max-only:
+ - hogehoge
+ - a
+ -
+ min-only:
+ - hoge
+ - hogehogehogehogehoge
+ max-and-min:
+ - hogehoge
+ - hoge
+---
+name: length2
+desc: length test (with max-ex and min-ex)
+schema:
+ type: map
+ mapping:
+ "max-ex-only":
+ type: seq
+ sequence:
+ - type: str
+ length:
+ max-ex: 8
+ "min-ex-only":
+ type: seq
+ sequence:
+ - type: str
+ length:
+ min-ex: 4
+ "max-ex-and-min-ex":
+ type: seq
+ sequence:
+ - type: str
+ length:
+ max-ex: 8
+ min-ex: 4
+data:
+ max-ex-only:
+ - hogehog
+ - a
+ -
+ min-ex-only:
+ - hoge!
+ max-ex-and-min-ex:
+ - hogehog
+ - hoge!
+---
+name: length3
+desc: length test (with min, max, max-ex and min-ex)
+schema:
+ type: map
+ mapping:
+ "A":
+ type: seq
+ sequence:
+ - type: str
+ length:
+ max: 8
+ min-ex: 4
+ "B":
+ type: seq
+ sequence:
+ - type: str
+ length:
+ max-ex: 8
+ min: 4
+data:
+ A:
+ - hogehoge
+ - hogeh
+ B:
+ - hogehog
+ - hoge
diff --git a/tests/files/success/test_mapping.yaml b/tests/files/success/test_mapping.yaml
new file mode 100644
index 0000000..fb82fd6
--- /dev/null
+++ b/tests/files/success/test_mapping.yaml
@@ -0,0 +1,308 @@
+---
+name: mapping1
+desc: Most basic mapping validation
+data:
+ foo: bar
+schema:
+ type: map
+ mapping:
+ foo:
+ type: str
+---
+name: mapping2
+desc: Complex mapping that test several subtypes for each key
+#
+schema:
+ type: map
+ required: true
+ mapping:
+ name:
+ type: str
+ required: true
+ email:
+ type: str
+ # This pattern value was modified from /@/ to .+@.+ to make it copmatible with python
+ pattern: .+@.+
+ required: True
+ age:
+ type: int
+ blood:
+ type: str
+ enum:
+ - A
+ - B
+ - O
+ - AB
+ birth:
+ type: date
+data:
+ name: foo
+ email: foo@mail.com
+ age: 20
+ blood: AB
+ birth: 1985-01-01
+---
+name: mapping3
+desc: Test that mapping works inside a sequence
+data:
+ - foo: True
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ foo:
+ type: bool
+---
+name: mapping4
+desc: Test that map inside seq inside map works
+data:
+ company: Kuwata lab.
+ email: webmaster@kuwata-lab.com
+ employees:
+ - code: 101
+ name: foo
+ email: foo@kuwata-lab.com
+ - code: 102
+ name: bar
+ email: bar@kuwata-lab.com
+schema:
+ type: map
+ mapping:
+ company:
+ type: str
+ required: True
+ email:
+ type: str
+ employees:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ code:
+ type: int
+ required: True
+ name:
+ type: str
+ required: True
+ email:
+ type: str
+---
+name: mapping5
+desc: test allowempty option to mapping
+data:
+ datasources:
+ test1: test1.py
+ test2: test2.py
+schema:
+ type: map
+ mapping:
+ datasources:
+ type: map
+ allowempty: True
+ mapping:
+ test1:
+ type: str
+---
+name: mapping6
+desc: Test that regex keys works
+data:
+ mic:
+ - input
+ - foo
+ mock:
+ - True
+ - False
+schema:
+ type: map
+ matching-rule: "any"
+ mapping:
+ re;(mi.+):
+ type: seq
+ sequence:
+ - type: str
+ regex;(mo.+):
+ type: seq
+ sequence:
+ - type: bool
+---
+name: mapping7
+desc: Test that mapping name works
+data:
+ datasources: test1.py
+schema:
+ type: map
+ mapping:
+ datasources:
+ type: str
+---
+name: mapping8
+desc: Test that map shortcut works
+data:
+ datasources: test1.py
+schema:
+ type: map
+ map:
+ datasources:
+ type: str
+---
+name: mapping9
+desc: Test that you do not have to specify type map
+data:
+ streams: foobar
+schema:
+ mapping:
+ streams:
+ type: str
+---
+name: mapping10
+desc: Test that you do not have to specify type map when map exists in schema
+data:
+ streams: foobar
+schema:
+ map:
+ streams:
+ type: str
+---
+name: mapping11
+desc: Test keyword regex default matching-rule any
+data:
+ foobar1: 1
+ foobar2: 2
+ bar2: 3
+schema:
+ type: map
+ mapping:
+ regex;([1-2]$):
+ type: int
+ regex;(^foobar):
+ type: int
+---
+name: mapping12
+desc: Test keyword regex declared matching-rule any
+data:
+ foobar1: 1
+ foobar2: 2
+ bar2: 3
+schema:
+ type: map
+ matching-rule: 'any'
+ mapping:
+ regex;([1-2]$):
+ type: int
+ regex;(^foobar):
+ type: int
+---
+name: mapping13
+desc: Test keyword regex declared matching-rule all
+data:
+ foobar1: 1
+ foobar2: 2
+ foobar3: 3
+schema:
+ type: map
+ matching-rule: 'all'
+ mapping:
+ regex;([1-3]$):
+ type: int
+ regex;(^foobar):
+ type: int
+---
+name: mapping14
+desc: Test mixed keyword regex and normal keyword
+data:
+ standard:
+ FRIST-800-53
+ AU-1:
+ family: AU
+ name: Audit and Accountability Policy and Procedures
+schema:
+ type: map
+ mapping:
+ regex;/[A-Z]-/:
+ type: map
+ mapping:
+ name:
+ type: str
+ family:
+ type: str
+ required: True
+ standard:
+ type: str
+---
+name: mapping-default-1
+desc: Test that default mapping keyword works out of the box in a good case
+data:
+ OWNERSHIP:
+ - code: 1
+ key: BLM-BURNS
+ alias: BLM-BURNS
+ WHT: foo
+schema:
+ type: map
+ mapping:
+ WHT:
+ type: str
+ =:
+ type: seq
+ required: true
+ sequence:
+ - type: map
+ mapping:
+ 'code':
+ type: int
+ required: true
+ unique: true
+ 'key':
+ type: str
+ required: true
+ 'alias':
+ type: str
+ required: true
+---
+name: mapping-default-2
+desc: default value of map with number type and no other key matching
+#
+schema:
+ type: map
+ mapping:
+ =:
+ type: number
+ range:
+ min: -10
+ max: 10
+#
+data:
+ value1: 0
+ value2: 10
+ value3: -10
+---
+name: mapping17
+desc: Test that allowempty works without specifying mapping keyword when used inside a sequence block
+data:
+ rally:
+ plugins:
+ - netcreate-boot: rally/rally-plugins/netcreate-boot
+schema:
+ type: map
+ mapping:
+ rally:
+ type: map
+ allowempty: True
+ mapping:
+ plugins:
+ type: seq
+ sequence:
+ - type: map
+ allowempty: True
+---
+name: mapping18
+desc: Test that regexes can be 'required'
+data:
+ person1: Jack
+ person2: Fred
+schema:
+ type: map
+ mapping:
+ regex;(person[1-9]):
+ required: True \ No newline at end of file
diff --git a/tests/files/success/test_matching.yaml b/tests/files/success/test_matching.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/success/test_matching.yaml
diff --git a/tests/files/success/test_merge.yaml b/tests/files/success/test_merge.yaml
new file mode 100644
index 0000000..1e0a94d
--- /dev/null
+++ b/tests/files/success/test_merge.yaml
@@ -0,0 +1,36 @@
+##
+---
+name: merge1
+desc: merge maps
+#
+schema:
+ type: map
+ mapping:
+ "group":
+ type: map
+ mapping:
+ "name": &name
+ type: str
+ required: True
+ "email": &email
+ type: str
+ pattern: .+@.+
+ required: False
+ "user":
+ type: map
+ mapping:
+ "name":
+ <<: *name # merge
+ length:
+ max: 16 # add
+ "email":
+ <<: *email # merge
+ required: True # override
+#
+data:
+ group:
+ name: foo
+ email: foo@mail.com
+ user:
+ name: bar
+ email: bar@mail.com
diff --git a/tests/files/success/test_name.yaml b/tests/files/success/test_name.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/success/test_name.yaml
diff --git a/tests/files/success/test_nullable.yaml b/tests/files/success/test_nullable.yaml
new file mode 100644
index 0000000..df8ddf8
--- /dev/null
+++ b/tests/files/success/test_nullable.yaml
@@ -0,0 +1,11 @@
+---
+name: nullable1
+desc: Test that nullable works
+data:
+ name:
+schema:
+ type: map
+ mapping:
+ name:
+ type: str
+ nullable: True
diff --git a/tests/files/success/test_pattern.yaml b/tests/files/success/test_pattern.yaml
new file mode 100644
index 0000000..41a751b
--- /dev/null
+++ b/tests/files/success/test_pattern.yaml
@@ -0,0 +1,18 @@
+---
+name: pattern1
+desc: Test simples pattern
+data: foo@gmail.com
+schema:
+ type: str
+ pattern: .+@.+
+---
+name: pattern2
+desc: Test simple pattern in list
+data:
+ - foo@mail.com
+ - bar@mail.net
+schema:
+ type: seq
+ sequence:
+ - type: str
+ pattern: .+@.+
diff --git a/tests/files/success/test_range.yaml b/tests/files/success/test_range.yaml
new file mode 100644
index 0000000..ef3c18a
--- /dev/null
+++ b/tests/files/success/test_range.yaml
@@ -0,0 +1,166 @@
+##
+---
+name: range1
+desc: range test && bug#?????
+#
+schema:
+ type: map
+ mapping:
+ "max-only":
+ type: seq
+ sequence:
+ - type: number
+ required: True
+ range: {max: 100}
+ "min-only":
+ type: seq
+ sequence:
+ - type: number
+ required: True
+ range: {min: 10.0}
+ "max-and-min":
+ type: seq
+ sequence:
+ - type: number
+ required: True
+ range: {max: 100.0, min: 10.0}
+#
+data:
+ max-only:
+ - 100
+ - 100.0
+ min-only:
+ - 10
+ - 10.0
+ max-and-min:
+ - 100
+ - 10
+ - 100.0
+ - 10.0
+##
+---
+name: range2
+desc: range test (with max-ex and min-ex)
+#
+schema:
+ type: map
+ mapping:
+ "max-ex-only":
+ type: seq
+ sequence:
+ - type: number
+ required: True
+ range: {max-ex: 100}
+ "min-ex-only":
+ type: seq
+ sequence:
+ - type: number
+ required: True
+ range: {min-ex: 10.0}
+ "max-ex-and-min-ex":
+ type: seq
+ sequence:
+ - type: number
+ required: True
+ range: {max-ex: 100.0, min-ex: 10.0}
+#
+data:
+ max-ex-only:
+ - 99
+ - 99.99999
+ min-ex-only:
+ - 11
+ - 10.00001
+ max-ex-and-min-ex:
+ - 99
+ - 11
+ - 99.99999
+ - 10.00001
+##
+---
+name: range3
+desc: range test (with max, min, max-ex and min-ex)
+#
+schema:
+ type: map
+ mapping:
+ "A":
+ type: seq
+ sequence:
+ - type: number
+ required: True
+ range: {max: 100, min-ex: 10.0}
+ "B":
+ type: seq
+ sequence:
+ - type: number
+ required: True
+ range: {min: 10, max-ex: 100.0}
+#
+data:
+ A:
+ - 100
+ - 10.00001
+ B:
+ - 10
+ - 99.99999
+---
+name: range4
+desc: Test range min/max works with map size
+data:
+ foo: bar
+schema:
+ type: map
+ range:
+ min: 1
+ max: 3
+ mapping:
+ foo:
+ type: str
+---
+name: range5
+desc: Test range works with seq
+data:
+ - foobar
+ - barfoo
+schema:
+ type: seq
+ range:
+ min: 1
+ max: 3
+ sequence:
+ - type: str
+---
+name: range6
+desc: test range on float type
+data:
+ 2.0
+schema:
+ type: float
+ range:
+ min: 1
+ max: 3
+---
+name: range7
+desc: Test range on float with negative boundary
+data:
+ -0.9
+schema:
+ type: float
+ range:
+ min: -1
+ max: 1.0
+---
+name: range8
+desc: Test range min-ex & max-ex
+data:
+ - 20
+ - 25
+ - 29
+schema:
+ type: seq
+ sequence:
+ - type: int
+ range:
+ max-ex: 30
+ min-ex: 18
diff --git a/tests/files/success/test_required.yaml b/tests/files/success/test_required.yaml
new file mode 100644
index 0000000..9787843
--- /dev/null
+++ b/tests/files/success/test_required.yaml
@@ -0,0 +1,15 @@
+---
+name: required1
+desc: Test that req and required works
+data:
+ name: foo
+ foo: bar
+schema:
+ type: map
+ mapping:
+ name:
+ type: str
+ req: True
+ foo:
+ type: str
+ required: True
diff --git a/tests/files/success/test_schema.yaml b/tests/files/success/test_schema.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/success/test_schema.yaml
diff --git a/tests/files/success/test_sequence.yaml b/tests/files/success/test_sequence.yaml
new file mode 100644
index 0000000..3a4491f
--- /dev/null
+++ b/tests/files/success/test_sequence.yaml
@@ -0,0 +1,44 @@
+##
+---
+name: sequence1
+desc: sequence test
+#
+schema:
+ type: seq
+ sequence:
+ - type: str
+data:
+ - foo
+ - bar
+ - baz
+---
+name: sequence2
+desc: test sequence showrtcut
+schema:
+ type: seq
+ seq:
+ - type: str
+data:
+ - foo
+ - bar
+ - baz
+---
+name: sequence3
+desc: Test that you do not have to specify type seq when keyword sequence is present
+data:
+ - foo
+ - bar
+ - foobar
+schema:
+ sequence:
+ - type: str
+---
+name: sequence4
+desc: Test that you do not have to specify type seq when keyword seq is present
+data:
+ - foo
+ - bar
+ - foobar
+schema:
+ seq:
+ - type: str
diff --git a/tests/files/success/test_sequence_multi.yaml b/tests/files/success/test_sequence_multi.yaml
new file mode 100644
index 0000000..432145c
--- /dev/null
+++ b/tests/files/success/test_sequence_multi.yaml
@@ -0,0 +1,64 @@
+---
+name: seq-multi-1
+desc: Test that multiple sequence values is supported
+data:
+ - "foo"
+ - 123
+schema:
+ type: seq
+ matching: "any"
+ seq:
+ - type: str
+ - type: int
+---
+name: seq-multi-2
+desc: Test that multiple sequence values with matching 'all' is supported
+data:
+ - "foo"
+ - "123"
+schema:
+ type: seq
+ matching: "all"
+ seq:
+ - type: str
+ - type: str
+---
+name: seq-multi-3
+desc: Test that multiple sequence values with matching '*' is supported
+data:
+ - "foo"
+schema:
+ type: seq
+ matching: "*"
+ seq:
+ - type: bool
+ - type: int
+---
+name: seq-multi-4
+desc: Test that multiple sequence values with nested data structures work
+data:
+ - foo: 123
+ - "foobar"
+schema:
+ type: seq
+ matching: "any"
+ seq:
+ - type: str
+ - type: map
+ mapping:
+ foo:
+ type: int
+---
+name: sq-multi-5
+desc: Test that multiple sequence vlaues with nested lists works
+data:
+ - - 123
+ - "foobar"
+schema:
+ type: seq
+ matching: "any"
+ seq:
+ - type: str
+ - type: seq
+ sequence:
+ - type: int
diff --git a/tests/files/success/test_type_any.yaml b/tests/files/success/test_type_any.yaml
new file mode 100644
index 0000000..cc015e4
--- /dev/null
+++ b/tests/files/success/test_type_any.yaml
@@ -0,0 +1,27 @@
+---
+name: type-any-1
+desc: test simples case of any type
+data: abc
+schema:
+ type: any
+---
+name: type-any-2
+desc: test any type inside sequence
+data:
+ - abc
+ - 123
+ - 3.14159
+ - True
+schema:
+ type: seq
+ sequence:
+ - type: any
+---
+name: type-any-3
+desc: test any type validates a dict
+data:
+ foobar:
+ barfoo:
+ opa: 1337
+schema:
+ type: any
diff --git a/tests/files/success/test_type_bool.yaml b/tests/files/success/test_type_bool.yaml
new file mode 100644
index 0000000..4857690
--- /dev/null
+++ b/tests/files/success/test_type_bool.yaml
@@ -0,0 +1,39 @@
+---
+name: bool1
+desc: Test simples bool value
+data: True
+schema:
+ type: bool
+---
+name: bool2
+desc: Test bool value inside list
+data:
+ - True
+ - False
+schema:
+ type: seq
+ sequence:
+ - type: bool
+---
+name: bool3
+desc: Test bool value in mapping
+data:
+ foo: True
+schema:
+ type: map
+ mapping:
+ foo:
+ type: bool
+---
+name: bool4
+desc: Test bool inside nested map & seq
+data:
+ foo:
+ - True
+schema:
+ type: map
+ mapping:
+ foo:
+ type: seq
+ sequence:
+ - type: bool
diff --git a/tests/files/success/test_type_date.yaml b/tests/files/success/test_type_date.yaml
new file mode 100644
index 0000000..b6166e6
--- /dev/null
+++ b/tests/files/success/test_type_date.yaml
@@ -0,0 +1,40 @@
+---
+name: type-date-1
+desc: basic test for date type with default formats
+data: "2017-01-01"
+schema:
+ type: date
+---
+name: type-date-2
+desc: Basic test for date type with defined date-formats
+data: "31-01-2017"
+schema:
+ type: date
+ format: "%d-%m-%Y"
+---
+name: type-date-3
+desc: Basic test for date type with defined date-formats
+data:
+ - "31-01-2017"
+ - "2017-01-31"
+schema:
+ type: seq
+ sequence:
+ - type: date
+ format:
+ - "%d-%m-%Y"
+ - "%Y-%m-%d"
+---
+name: type-data-4
+desc: Basic test for many different possible values with default formats
+data:
+ - '1997'
+ - '1997-07'
+ - '1997-07-16'
+ - '1997-07-16T19:20+01:00'
+ - '1997-07-16T19:20:30+01:00'
+ - '1997-07-16T19:20:30.45+01:00'
+schema:
+ type: seq
+ sequence:
+ - type: date
diff --git a/tests/files/success/test_type_enum.yaml b/tests/files/success/test_type_enum.yaml
new file mode 100644
index 0000000..bde9bd7
--- /dev/null
+++ b/tests/files/success/test_type_enum.yaml
@@ -0,0 +1,43 @@
+---
+name: type-enum-1
+desc: Test the most basic case for enum
+data: C
+schema:
+ type: str
+ enum: [A, B, C, D, E]
+---
+name: type-enum-2
+desc: Test bool value inside list
+data:
+ - B
+ - C
+schema:
+ type: seq
+ sequence:
+ - type: str
+ enum: [A, B, C, D, E]
+---
+name: type-enum-3
+desc: Test bool value in mapping
+data:
+ foo: A
+schema:
+ type: map
+ mapping:
+ foo:
+ type: str
+ enum: [A, B, C, D, E]
+---
+name: type-enum-4
+desc: Test bool inside nested map & seq
+data:
+ foo:
+ - C
+schema:
+ type: map
+ mapping:
+ foo:
+ type: seq
+ sequence:
+ - type: str
+ enum: [A, B, C, D, E]
diff --git a/tests/files/success/test_type_float.yaml b/tests/files/success/test_type_float.yaml
new file mode 100644
index 0000000..ffd1bba
--- /dev/null
+++ b/tests/files/success/test_type_float.yaml
@@ -0,0 +1,43 @@
+---
+name: float1
+desc: Test simples float value
+data: 3.14159
+schema:
+ type: float
+---
+name: float2
+desc: Test float value inside list
+data:
+ - 1
+ - 3.14159
+schema:
+ type: seq
+ sequence:
+ - type: float
+---
+name: float3
+desc: Test float value in mapping
+data:
+ foo: 3.14159
+ bar: 1
+schema:
+ type: map
+ mapping:
+ foo:
+ type: float
+ bar:
+ type: float
+---
+name: float4
+desc: Test float inside nested map & seq
+data:
+ foo:
+ - 1
+ - 3.14159
+schema:
+ type: map
+ mapping:
+ foo:
+ type: seq
+ sequence:
+ - type: float
diff --git a/tests/files/success/test_type_int.yaml b/tests/files/success/test_type_int.yaml
new file mode 100644
index 0000000..93dd805
--- /dev/null
+++ b/tests/files/success/test_type_int.yaml
@@ -0,0 +1,38 @@
+---
+name: int1
+desc: Test simples int value
+data: 1
+schema:
+ type: int
+---
+name: int2
+desc: Test int value inside list
+data:
+ - 1
+schema:
+ type: seq
+ sequence:
+ - type: int
+---
+name: int3
+desc: Test int value in mapping
+data:
+ foo: 1
+schema:
+ type: map
+ mapping:
+ foo:
+ type: int
+---
+name: int4
+desc: Test int inside nested map & seq
+data:
+ foo:
+ - 1
+schema:
+ type: map
+ mapping:
+ foo:
+ type: seq
+ sequence:
+ - type: int
diff --git a/tests/files/success/test_type_map.yaml b/tests/files/success/test_type_map.yaml
new file mode 100644
index 0000000..010b0e9
--- /dev/null
+++ b/tests/files/success/test_type_map.yaml
@@ -0,0 +1,39 @@
+---
+name: type-map-1
+desc: Test the most basic case for map
+data:
+ foo: bar
+schema:
+ type: map
+ mapping:
+ foo:
+ type: str
+---
+name: type-map-2
+desc:
+data:
+ - foo: bar
+ - foo: bar
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ foo:
+ type: str
+---
+name: type-map-3
+desc: Test bool inside nested map & seq
+data:
+ foo:
+ - bar: foobar
+schema:
+ type: map
+ mapping:
+ foo:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ bar:
+ type: str
diff --git a/tests/files/success/test_type_none.yaml b/tests/files/success/test_type_none.yaml
new file mode 100644
index 0000000..db65652
--- /dev/null
+++ b/tests/files/success/test_type_none.yaml
@@ -0,0 +1,47 @@
+#
+# NOTE: This case is not allowed becuase Core class do NOT allow
+# there is no data to validate. This happens if None is at top level
+# of the data structure.
+#
+# ---
+# name: type-none-1
+# desc: Most basic test for type None
+# data: ~
+# schema:
+# type: none
+---
+name: type-none-2
+desc: Test that none type works with none type as value in map
+data:
+ name: ~
+schema:
+ type: map
+ mapping:
+ name:
+ type: none
+---
+name: type-none-3
+desc: Test that none type works as value in sequence
+data:
+ - ~
+ - ~
+schema:
+ type: seq
+ sequence:
+ - type: none
+---
+name: type-none-4
+desc: Test that none type works inside nested map, seq, map
+data:
+ foo:
+ - bar: ~
+schema:
+ type: map
+ mapping:
+ foo:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ bar:
+ type: none
diff --git a/tests/files/success/test_type_number.yaml b/tests/files/success/test_type_number.yaml
new file mode 100644
index 0000000..f0b5cb4
--- /dev/null
+++ b/tests/files/success/test_type_number.yaml
@@ -0,0 +1,76 @@
+---
+name: type-number-1
+desc: Most basic test for type number
+data: '1337.0'
+schema:
+ type: number
+---
+name: type-number-2
+desc: Test that number type works with as value in map
+data:
+ foo: 1337
+ bar: 3.14159
+ qwe: 0.0
+ rty: '1337'
+ ewq: '3.14159'
+ dsa: '0.0'
+schema:
+ type: map
+ mapping:
+ foo:
+ type: number
+ bar:
+ type: number
+ qwe:
+ type: number
+ rty:
+ type: number
+ ewq:
+ type: number
+ dsa:
+ type: number
+---
+name: type-number-3
+desc: Test that different number values works as values in seq
+data:
+ - 1337
+ - 3.14159
+ - 0.0
+ - '1337'
+ - '3.14159'
+ - '0.0'
+schema:
+ type: seq
+ sequence:
+ - type: number
+---
+name: type-number-4
+desc: Test that number type works inside nested map, seq, map
+data:
+ foobar:
+ - foo: 1337
+ bar: 3.14159
+ qwe: 0.0
+ rty: '1337'
+ ewq: '3.14159'
+ dsa: '0.0'
+schema:
+ type: map
+ mapping:
+ foobar:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ foo:
+ type: number
+ bar:
+ type: number
+ qwe:
+ type: number
+ rty:
+ type: number
+ ewq:
+ type: number
+ dsa:
+ type: number
diff --git a/tests/files/success/test_type_scalar.yaml b/tests/files/success/test_type_scalar.yaml
new file mode 100644
index 0000000..da34401
--- /dev/null
+++ b/tests/files/success/test_type_scalar.yaml
@@ -0,0 +1,76 @@
+---
+name: type-scalar-1
+desc: Most basic test for type scalar
+data: '1337.0'
+schema:
+ type: scalar
+---
+name: type-scalar-2
+desc: Test that scalar type works with as value in map
+data:
+ foo: 1337
+ bar: 3.14159
+ qwe: True
+ rty: '1337'
+ ewq: '3.14159'
+ dsa: '0.0'
+schema:
+ type: map
+ mapping:
+ foo:
+ type: scalar
+ bar:
+ type: scalar
+ qwe:
+ type: scalar
+ rty:
+ type: scalar
+ ewq:
+ type: scalar
+ dsa:
+ type: scalar
+---
+name: type-scalar-3
+desc: Test that different scalar values works as values in seq
+data:
+ - 1337
+ - 3.14159
+ - True
+ - '1337'
+ - '3.14159'
+ - '0.0'
+schema:
+ type: seq
+ sequence:
+ - type: scalar
+---
+name: type-scalar-4
+desc: Test that scalar type works inside nested map, seq, map
+data:
+ foobar:
+ - foo: 1337
+ bar: 3.14159
+ qwe: True
+ rty: '1337'
+ ewq: '3.14159'
+ dsa: '0.0'
+schema:
+ type: map
+ mapping:
+ foobar:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ foo:
+ type: scalar
+ bar:
+ type: scalar
+ qwe:
+ type: scalar
+ rty:
+ type: scalar
+ ewq:
+ type: scalar
+ dsa:
+ type: scalar
diff --git a/tests/files/success/test_type_seq.yaml b/tests/files/success/test_type_seq.yaml
new file mode 100644
index 0000000..5eb1b16
--- /dev/null
+++ b/tests/files/success/test_type_seq.yaml
@@ -0,0 +1,36 @@
+---
+name: type-seq-1
+desc: Test the most basic case for seq
+data:
+ - foo
+schema:
+ type: seq
+ sequence:
+ - type: str
+---
+name: type-seq-2
+desc: Test that seq in seq works
+data:
+ - - True
+ - - False
+schema:
+ type: seq
+ sequence:
+ - type: seq
+ sequence:
+ - type: bool
+---
+name: type-seq-3
+desc: Test bool inside nested map & seq
+data:
+ - foo:
+ - True
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ foo:
+ type: seq
+ sequence:
+ - type: bool
diff --git a/tests/files/success/test_type_str.yaml b/tests/files/success/test_type_str.yaml
new file mode 100644
index 0000000..e5717c9
--- /dev/null
+++ b/tests/files/success/test_type_str.yaml
@@ -0,0 +1,56 @@
+---
+name: str1
+desc: Test simples str value
+data: "foobar"
+schema:
+ type: str
+---
+name: str2
+desc: Test str value inside list
+data:
+ - 'foo'
+ - bar
+schema:
+ type: seq
+ sequence:
+ - type: str
+---
+name: str3
+desc: Test str value in mapping
+data:
+ foo: 'foobar'
+schema:
+ type: map
+ mapping:
+ foo:
+ type: str
+---
+name: str4
+desc: Test str inside nested map & seq
+data:
+ foo:
+ - 'foo'
+schema:
+ type: map
+ mapping:
+ foo:
+ type: seq
+ sequence:
+ - type: str
+---
+name: deftype1
+desc: default type test. Becuase str is the default type it is in this file.
+#
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ "name":
+ "email":
+#
+data:
+ - name: foo
+ email: foo@mail.com
+ - name: bar
+ - email: baz@mail.com
diff --git a/tests/files/success/test_type_symbol.yaml b/tests/files/success/test_type_symbol.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/files/success/test_type_symbol.yaml
diff --git a/tests/files/success/test_type_text.yaml b/tests/files/success/test_type_text.yaml
new file mode 100644
index 0000000..7f218e2
--- /dev/null
+++ b/tests/files/success/test_type_text.yaml
@@ -0,0 +1,75 @@
+---
+name: type-text-1
+desc: Test simples text type
+data: "foobar"
+schema:
+ type: text
+---
+name: type-text-2
+desc: Test possible values as values in seq
+data:
+ - 'abc'
+ - '1337'
+ - '3.14159'
+ - 1337
+ - 3.14159
+schema:
+ type: seq
+ sequence:
+ - type: text
+---
+name: type-text-3
+desc: Test possible values as values in map
+data:
+ foo: 1337
+ bar: 3.14159
+ qwe: 'abc'
+ rty: '1337'
+ ewq: '3.14159'
+ dsa: '0.0'
+schema:
+ type: map
+ mapping:
+ foo:
+ type: text
+ bar:
+ type: text
+ qwe:
+ type: text
+ rty:
+ type: text
+ ewq:
+ type: text
+ dsa:
+ type: text
+---
+name: type-text-4
+desc: Test that text type works inside nested map, seq, map
+data:
+ foobar:
+ - foo: 1337
+ bar: 3.14159
+ qwe: 'abc'
+ rty: '1337'
+ ewq: '3.14159'
+ dsa: '0.0'
+schema:
+ type: map
+ mapping:
+ foobar:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ foo:
+ type: text
+ bar:
+ type: text
+ qwe:
+ type: text
+ rty:
+ type: text
+ ewq:
+ type: text
+ dsa:
+ type: text
diff --git a/tests/files/success/test_type_timestamp.yaml b/tests/files/success/test_type_timestamp.yaml
new file mode 100644
index 0000000..5bcfbcc
--- /dev/null
+++ b/tests/files/success/test_type_timestamp.yaml
@@ -0,0 +1,37 @@
+---
+name: type-timestamp-1
+desc: Most basic timestamp test
+data: "2015-03-29T18:45:00+00:00"
+schema:
+ type: timestamp
+---
+name: type-timestamp-2
+desc: Test timestamps as values in seq
+data:
+ - "2015-03-29T18:45:00+00:00"
+ - "2015-03-29T18:45:00"
+ - "2015-03-29T11:45:00 -0700"
+ - "2015-03-29"
+schema:
+ type: seq
+ sequence:
+ - type: timestamp
+---
+name: type-timestamp-3
+desc: Basic test of different types of timestamps
+data:
+ d1: "2015-03-29T18:45:00+00:00"
+ d2: "2015-03-29T18:45:00"
+ d3: "2015-03-29T11:45:00 -0700"
+ d4: "2015-03-29"
+schema:
+ type: map
+ mapping:
+ d1:
+ type: timestamp
+ d2:
+ type: timestamp
+ d3:
+ type: timestamp
+ d4:
+ type: timestamp
diff --git a/tests/files/success/test_unique.yaml b/tests/files/success/test_unique.yaml
new file mode 100644
index 0000000..20cb64f
--- /dev/null
+++ b/tests/files/success/test_unique.yaml
@@ -0,0 +1,138 @@
+---
+name: unique1
+desc: unique constraint test with map
+#
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ "name":
+ unique: true
+ "age":
+ type: int
+#
+data:
+ - name: foo
+ age: 10
+ - name: bar
+ age: 10
+ - name: baz
+ age: 10
+---
+name: unique2
+desc: unique constraint test with seq
+#
+schema:
+ type: seq
+ sequence:
+ - type: str
+ unique: true
+#
+data:
+ - foo
+ - ~
+ - bar
+ - ~
+ - baz
+---
+name: unique3
+desc: unique constraint and '<<' (merge)
+#
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ "name":
+ type: str
+ required: True
+ unique: true
+ "value":
+ type: any
+ required: True
+#
+data:
+ # no sense
+ - name: x1
+ value: 10
+ - name: x2
+ value: 20
+---
+name: unique4
+desc: unique constrant and anchor
+#
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ "name":
+ type: str
+ "value":
+ type: any
+#
+data:
+ # no sense
+ - name: x1
+ value: 10
+ - name: x2
+ value: 20
+---
+name: unique5
+desc: unique constring in nested data structures
+data:
+ - name: foo
+ email: admin@mail.com
+ groups:
+ - users
+ - foo
+ - admin
+ - name: bar
+ email: admin@mail.com
+ groups:
+ - users
+ - admin
+ - name: baz
+ email: baz@mail.com
+ groups:
+ - users
+schema:
+ type: seq
+ sequence:
+ - type: map
+ required: True
+ mapping:
+ name:
+ type: str
+ required: True
+ unique: True
+ email:
+ type: str
+ groups:
+ type: seq
+ sequence:
+ - type: str
+ unique: True
+---
+name: unique6
+desc: Test that unique do not fail when the key it tries to lookup is missing
+data:
+ - xref: 'GOC:hm'
+ uri: 'GOC:hm'
+ - uri: 'http://orcid.org/0000-0002-4862-3181'
+schema:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ "uri":
+ type: str
+ required: true
+ unique: true
+ pattern: ^((ht|f)tp(s?)\:\/\/\w[\/\.\-\:\w]+)|(GOC\:[\w\_]+)$
+ "xref":
+ type: str
+ required: false
+ unique: true
+ pattern: ^\w+:\w+$
diff --git a/tests/files/success/test_version.yaml b/tests/files/success/test_version.yaml
new file mode 100644
index 0000000..32621ba
--- /dev/null
+++ b/tests/files/success/test_version.yaml
@@ -0,0 +1,5 @@
+---
+data: foo
+schema:
+ version: 1.0.0
+ type: str
diff --git a/tests/files/unicode/1f.yaml b/tests/files/unicode/1f.yaml
new file mode 100644
index 0000000..97d51f5
--- /dev/null
+++ b/tests/files/unicode/1f.yaml
@@ -0,0 +1,13 @@
+schema:
+ type: map
+ mapping:
+ msg:
+ type: int
+ Alô:
+ type: int
+data:
+ msg: "Alô do Brasil!!"
+ Alô: "Brasil!!"
+errors:
+ - "Value 'Alô do Brasil!!' is not of type 'int'. Path: '/msg'"
+ - "Value 'Brasil!!' is not of type 'int'. Path: '/Alô'"
diff --git a/tests/files/unicode/1s.yaml b/tests/files/unicode/1s.yaml
new file mode 100644
index 0000000..ab9fff9
--- /dev/null
+++ b/tests/files/unicode/1s.yaml
@@ -0,0 +1,10 @@
+schema:
+ type: map
+ mapping:
+ msg:
+ type: str
+ Alô:
+ type: str
+data:
+ msg: "Alô do Brasil!!"
+ Alô: "Brasil!!"
diff --git a/tests/files/unicode/3f.yaml b/tests/files/unicode/3f.yaml
new file mode 100644
index 0000000..a963f1b
--- /dev/null
+++ b/tests/files/unicode/3f.yaml
@@ -0,0 +1,10 @@
+schema:
+ type: seq
+ sequence:
+ - type: int
+data:
+ - foobar
+ - åäö
+errors:
+ - "Value 'foobar' is not of type 'int'. Path: '/0'"
+ - "Value 'åäö' is not of type 'int'. Path: '/1'"
diff --git a/tests/files/unicode/3s.yaml b/tests/files/unicode/3s.yaml
new file mode 100644
index 0000000..3ab54de
--- /dev/null
+++ b/tests/files/unicode/3s.yaml
@@ -0,0 +1,7 @@
+schema:
+ type: seq
+ sequence:
+ - type: str
+data:
+ - foobar
+ - åäö
diff --git a/tests/test_cli.py b/tests/test_cli.py
new file mode 100644
index 0000000..33da698
--- /dev/null
+++ b/tests/test_cli.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+# python std lib
+import os
+import sys
+
+# pykwalify package imports
+from pykwalify import cli
+
+
+class TestCLI(object):
+
+ def test_cli(self, tmpdir):
+ """
+ Test that when passing in certain arguments from commandline they
+ are handled correctly by docopt and correct args structure is returned.
+ """
+ input = tmpdir.join("cli/1a.yaml")
+ schema_file = tmpdir.join("cli/1b.yaml")
+
+ sys.argv = [
+ 'scripts/pykwalify',
+ '-d', str(input),
+ '-s', str(schema_file),
+ '-v'
+ ]
+
+ expected = {
+ '--data-file': str(input),
+ '--schema-file': [str(schema_file)],
+ '--quiet': False,
+ '--verbose': 1,
+ }
+
+ cli_args = cli.parse_cli()
+
+ for k, v in expected.items():
+ assert k in cli_args
+ assert cli_args[k] == expected[k]
+
+ def f(self, *args):
+ """
+ Returns abs path to test files inside tests/files/
+ """
+ return os.path.join(os.path.dirname(os.path.realpath(__file__)), "files", *args)
+
+ def test_run_cli(self):
+ """
+ This should test that running the cli still works as expected
+ """
+ input = self.f("cli/1a.yaml")
+ schema_file = self.f("cli/1b.yaml")
+
+ sys.argv = [
+ 'scripts/pykwalify',
+ '-d', str(input),
+ '-s', str(schema_file),
+ ]
+
+ cli_args = cli.parse_cli()
+ c = cli.run(cli_args)
+ assert c.validation_errors == []
diff --git a/tests/test_core.py b/tests/test_core.py
new file mode 100644
index 0000000..6fcd81e
--- /dev/null
+++ b/tests/test_core.py
@@ -0,0 +1,572 @@
+# -*- coding: utf-8 -*-
+
+""" Unit test for pyKwalify - Core """
+
+# python std lib
+import os
+
+# pykwalify imports
+import pykwalify
+from pykwalify.core import Core
+from pykwalify.errors import SchemaError, CoreError
+
+# 3rd party imports
+import pytest
+from pykwalify.compat import yaml
+from testfixtures import compare
+
+
+class TestCore(object):
+
+ def setUp(self):
+ pykwalify.partial_schemas = {}
+
+ def f(self, *args):
+ return os.path.join(os.path.dirname(os.path.realpath(__file__)), "files", *args)
+
+ def test_create_empty_core_object(self, tmpdir):
+ """
+ If createing a core object without any source or schema file an exception should be raised.
+ """
+ with pytest.raises(CoreError) as ex:
+ Core()
+ assert "No source file/data was loaded" in str(ex.value)
+
+ # To trigger schema exception we must pass in a source file
+ source_f = tmpdir.join("bar.json")
+ source_f.write("3.14159")
+
+ with pytest.raises(CoreError) as ex:
+ Core(source_file=str(source_f))
+ assert "No schema file/data was loaded" in str(ex.value)
+
+ def test_load_non_existing_file(self):
+ file_to_load = "/tmp/foo/bar/barfoo"
+ assert not os.path.exists(file_to_load), "Following file cannot exists on your system while running these tests : {0}".format(file_to_load)
+ with pytest.raises(CoreError) as ex:
+ Core(source_file=file_to_load)
+ assert "Provided source_file do not exists on disk" in str(ex.value)
+
+ def test_load_non_existsing_schema_file(self):
+ """
+ Exception should be raised if the specefied schema file do not exists on disk.
+ """
+ file_to_load = "/tmp/foo/bar/barfoo"
+ assert not os.path.exists(file_to_load), "Following file cannot exists on your system while running these tests : {0}".format(file_to_load)
+ with pytest.raises(CoreError) as ex:
+ Core(schema_files=[file_to_load])
+ assert "Provided source_file do not exists on disk" in str(ex.value)
+
+ def test_load_wrong_schema_files_type(self):
+ """
+ It should only be possible to send in a list type as 'schema_files' object
+ """
+ with pytest.raises(CoreError) as ex:
+ Core(source_file=None, schema_files={})
+ assert "schema_files must be of list type" in str(ex.value)
+
+ def test_load_json_file(self, tmpdir):
+ """
+ Load source & schema files that has json file ending.
+ """
+ source_f = tmpdir.join("bar.json")
+ source_f.write("3.14159")
+
+ schema_f = tmpdir.join("foo.json")
+ schema_f.write('{"type": "float"}')
+
+ Core(source_file=str(source_f), schema_files=[str(schema_f)])
+
+ # TODO: Try to load a non existing json file
+
+ def test_load_yaml_files(self, tmpdir):
+ """
+ Load source & schema files that has yaml file ending.
+ """
+ source_f = tmpdir.join("foo.yaml")
+ source_f.write("3.14159")
+
+ schema_f = tmpdir.join("bar.yaml")
+ schema_f.write("type: float")
+
+ Core(source_file=str(source_f), schema_files=[str(schema_f)])
+
+ def test_load_unsupported_format(self, tmpdir):
+ """
+ Try to load some fileending that is not supported.
+ Currently XML is not supported.
+ """
+ source_f = tmpdir.join("foo.xml")
+ source_f.write("<foo>bar</foo>")
+
+ schema_f = tmpdir.join("bar.xml")
+ schema_f.write("<foo>bar</foo>")
+
+ with pytest.raises(CoreError) as ex:
+ Core(source_file=str(source_f))
+ assert "Unable to load source_file. Unknown file format of specified file path" in str(ex.value)
+
+ with pytest.raises(CoreError) as ex:
+ Core(schema_files=[str(schema_f)])
+ assert "Unknown file format. Supported file endings is" in str(ex.value)
+
+ def test_load_empty_json_file(self, tmpdir):
+ """
+ Loading an empty json files should raise an exception
+ """
+ # Load empty source file
+ source_f = tmpdir.join("foo.json")
+ source_f.write("")
+
+ schema_f = tmpdir.join("bar.json")
+ schema_f.write("")
+
+ with pytest.raises(ValueError) as ex:
+ Core(source_file=str(source_f), schema_files=[str(schema_f)])
+ # Python 2.7 and Python 3.5 JSON parsers return different exception
+ # strings for the same data file, so check for both errors strings.
+ assert ("No JSON object could be decoded" in str(ex.value) or
+ "Expecting value:" in str(ex.value))
+
+ # Load empty schema files
+ source_f = tmpdir.join("foo.json")
+ source_f.write("3.14159")
+
+ schema_f = tmpdir.join("bar.json")
+ schema_f.write("")
+
+ with pytest.raises(ValueError) as ex:
+ Core(source_file=str(source_f), schema_files=[str(schema_f)])
+ assert ("No JSON object could be decoded" in str(ex.value) or
+ "Expecting value:" in str(ex.value))
+
+ def test_load_empty_yaml_file(self, tmpdir):
+ """
+ Loading empty yaml files should raise an exception
+ """
+ # Load empty source file
+ source_f = tmpdir.join("foo.yaml")
+ source_f.write("")
+
+ schema_f = tmpdir.join("bar.yaml")
+ schema_f.write("")
+
+ # TODO: This is abit buggy because wrong exception is raised...
+ with pytest.raises(CoreError) as ex:
+ Core(source_file=str(source_f), schema_files=[str(schema_f)])
+ # assert "Unable to load any data from source yaml file" in str(ex.value)
+
+ # Load empty schema files
+ source_f = tmpdir.join("foo.yaml")
+ source_f.write("3.14159")
+
+ schema_f = tmpdir.join("bar.yaml")
+ schema_f.write("")
+
+ with pytest.raises(CoreError) as ex:
+ Core(source_file=str(source_f), schema_files=[str(schema_f)])
+ assert "No data loaded from file" in str(ex.value)
+
+ def test_validation_error_but_not_raise_exception(self):
+ """
+ Test that if 'raise_exception=False' when validating that no exception is raised.
+
+ Currently file 2a.yaml & 2b.yaml is designed to cause exception.
+ """
+ c = Core(source_file=self.f("cli", "2a.yaml"), schema_files=[self.f("cli", "2b.yaml")])
+ c.validate(raise_exception=False)
+
+ assert c.validation_errors == [
+ "Value '1' is not of type 'str'. Path: '/0'", "Value '2' is not of type 'str'. Path: '/1'", "Value '3' is not of type 'str'. Path: '/2'"
+ ]
+
+ # TODO: Fix this issue...
+ # assert ('pykwalify.core', 'ERROR', 'Errors found but will not raise exception...') in l.actual()
+
+ def test_core_data_mode(self):
+ Core(source_data=3.14159, schema_data={"type": "number"}).validate()
+ Core(source_data="1e-06", schema_data={"type": "float"}).validate()
+ Core(source_data=3.14159, schema_data={"type": "float"}).validate()
+ Core(source_data=3, schema_data={"type": "float"}).validate()
+ Core(source_data=3, schema_data={"type": "int"}).validate()
+ Core(source_data=True, schema_data={"type": "bool"}).validate()
+ Core(source_data="foobar", schema_data={"type": "str"}).validate()
+ Core(source_data="foobar", schema_data={"type": "text"}).validate()
+ Core(source_data="foobar", schema_data={"type": "any"}).validate()
+
+ # Test that 'any' allows types that is not even implemented
+ def foo():
+ pass
+ Core(source_data=foo, schema_data={"type": "any"}).validate()
+ Core(source_data=lambda x: x, schema_data={"type": "any"}).validate()
+
+ with pytest.raises(SchemaError):
+ Core(source_data="1z-06", schema_data={"type": "float"}).validate()
+
+ with pytest.raises(SchemaError):
+ Core(source_data="abc", schema_data={"type": "number"}).validate()
+
+ with pytest.raises(SchemaError):
+ Core(source_data=3.14159, schema_data={"type": "int"}).validate()
+
+ with pytest.raises(SchemaError):
+ Core(source_data=1337, schema_data={"type": "bool"}).validate()
+
+ with pytest.raises(SchemaError):
+ Core(source_data=1, schema_data={"type": "str"}).validate()
+
+ with pytest.raises(SchemaError):
+ Core(source_data=True, schema_data={"type": "text"}).validate()
+
+ def test_multi_file_support(self):
+ """
+ This should test that multiple files is supported correctly
+ """
+ pass_tests = [
+ # Test that include directive can be used at top level of the schema
+ (
+ [
+ self.f("partial_schemas", "1s-schema.yaml"),
+ self.f("partial_schemas", "1s-partials.yaml"),
+ ],
+ self.f("partial_schemas", "1s-data.yaml"),
+ {
+ 'sequence': [{'include': 'fooone'}],
+ 'type': 'seq',
+ }
+ ),
+ # # This test that include directive works inside sequence
+ # ([self.f("33a.yaml"), self.f("33b.yaml")], self.f("33c.yaml"), {'sequence': [{'include': 'fooone'}], 'type': 'seq'}),
+ # This test recursive schemas
+ (
+ [
+ self.f("partial_schemas", "2s-schema.yaml"),
+ self.f("partial_schemas", "2s-partials.yaml"),
+ ],
+ self.f("partial_schemas", "2s-data.yaml"),
+ {
+ 'sequence': [{'include': 'fooone'}],
+ 'type': 'seq',
+ }
+ ),
+ # This tests that you can include a partial schema alongside other rules in a map
+ (
+ [
+ self.f("partial_schemas", "7s-schema.yaml"),
+ ],
+ self.f("partial_schemas", "7s-data.yaml"),
+ {
+ 'type': 'map',
+ 'mapping': {
+ 'foo': {
+ 'type': 'str',
+ 'required': True
+ },
+ 'bar': {
+ 'include': 'bar'
+ }
+ }
+ }
+ )
+ ]
+
+ failing_tests = [
+ # Test include inside partial schema
+ (
+ [
+ self.f("partial_schemas", "1f-schema.yaml"),
+ self.f("partial_schemas", "1f-partials.yaml")
+ ],
+ self.f("partial_schemas", "1f-data.yaml"),
+ SchemaError,
+ ["Cannot find partial schema with name 'fooonez'. Existing partial schemas: 'bar, fooone, foothree, footwo'. Path: '/0'"]
+ ),
+ (
+ [
+ self.f('partial_schemas', '2f-schema.yaml')
+ ],
+ self.f('partial_schemas', '2f-data.yaml'),
+ SchemaError,
+ ["Value 'True' is not of type 'str'. Path: '/0'"]
+ ),
+ (
+ [
+ self.f('partial_schemas', '3f-schema.yaml')
+ ],
+ self.f('partial_schemas', '3f-data.yaml'),
+ SchemaError,
+ ["Value 'True' is not of type 'str'. Path: ''"]
+ ),
+ (
+ [
+ self.f('partial_schemas', '4f-schema.yaml')
+ ],
+ self.f('partial_schemas', '4f-data.yaml'),
+ SchemaError,
+ ["Value 'True' is not of type 'str'. Path: '/0/foo/0/bar'"]
+ ),
+ (
+ [
+ self.f('partial_schemas', '5f-schema.yaml')
+ ],
+ self.f('partial_schemas', '5f-data.yaml'),
+ SchemaError,
+ ["Value 'True' is not of type 'str'. Path: '/0/0/0/0'"]
+ ),
+ (
+ [
+ self.f('partial_schemas', '6f-schema.yaml')
+ ],
+ self.f('partial_schemas', '6f-data.yaml'),
+ SchemaError,
+ ["Value 'True' is not of type 'str'. Path: '/foo/bar/qwe/ewq'"]
+ )
+ ]
+
+ for passing_test in pass_tests:
+ try:
+ c = Core(source_file=passing_test[1], schema_files=passing_test[0])
+ c.validate()
+ compare(c.validation_errors, [], prefix="No validation errors should exist...")
+ except Exception as e:
+ print("ERROR RUNNING FILE: {0} : {1}".format(passing_test[0], passing_test[1]))
+ raise e
+
+ # This serve as an extra schema validation that tests more complex structures then testrule.py do
+ compare(c.root_rule.schema_str, passing_test[2], prefix="Parsed rules is not correct, something have changed...")
+
+ for failing_test in failing_tests:
+ with pytest.raises(failing_test[2], message="Test files: {0} : {1}".format(", ".join(failing_test[0]), failing_test[1])):
+ c = Core(schema_files=failing_test[0], source_file=failing_test[1])
+ c.validate()
+
+ if not c.validation_errors:
+ raise AssertionError("No validation_errors was raised...")
+
+ compare(
+ sorted(c.validation_errors),
+ sorted(failing_test[3]),
+ prefix="Wrong validation errors when parsing files : {0} : {1}".format(
+ failing_test[0],
+ failing_test[1],
+ ),
+ )
+
+ def test_core_files(self):
+ # These tests should pass with no exception raised
+ pass_tests = [
+ # All tests for keyword assert
+ "test_assert.yaml",
+ # All tests for keyword default
+ "test_default.yaml",
+ # All tests for keyword desc
+ "test_desc.yaml",
+ # All tests for keyword enum
+ "test_enum.yaml",
+ # All tests for keyword example
+ "test_example.yaml",
+ # All tests for keyword extensions
+ "test_extensions.yaml",
+ # All tests for keyword func
+ "test_func.yaml",
+ # All tests for keyword ident
+ "test_ident.yaml",
+ # All tests for keyword include
+ "test_include.yaml",
+ # All tests for keyword length
+ "test_length.yaml",
+ # All tests for keyword mapping
+ "test_mapping.yaml",
+ # All tests for keyword matching
+ "test_matching.yaml",
+ # All tests for keyword name
+ "test_name.yaml",
+ # All tests for keyword nullable
+ "test_nullable.yaml",
+ # All tests for keyword pattern
+ "test_pattern.yaml",
+ # All tests for keyword range
+ "test_range.yaml",
+ # All tests for keyword required
+ "test_required.yaml",
+ # All tests for keyword schema
+ "test_schema.yaml",
+ # All tests for keyword sequence
+ "test_sequence.yaml",
+ # All tests for keyword unique
+ "test_unique.yaml",
+ # All tests for keyword version
+ "test_version.yaml",
+
+ # All test cases for Multiple sequence checks
+ "test_sequence_multi.yaml",
+ # All test cases for merging
+ "test_merge.yaml",
+ # All test cases for yaml anchors
+ "test_anchor.yaml",
+
+ # All tests for TYPE: any
+ "test_type_any.yaml",
+ # All tests for TYPE: bool
+ "test_type_bool.yaml",
+ # All tests for TYPE: date
+ "test_type_date.yaml",
+ # All tests for TYPE: enum
+ "test_type_enum.yaml",
+ # All tests for TYPE: float
+ "test_type_float.yaml",
+ # All tests for TYPE: int
+ "test_type_int.yaml",
+ # All tests for TYPE: map
+ "test_type_map.yaml",
+ # All tests for TYPE: none
+ "test_type_none.yaml",
+ # All tests for TYPE: number
+ "test_type_number.yaml",
+ # All tests for TYPE: scalar
+ "test_type_scalar.yaml",
+ # All tests for TYPE: seq
+ "test_type_seq.yaml",
+ # All tests for TYPE: str
+ "test_type_str.yaml",
+ # All tests for TYPE: symbol
+ "test_type_symbol.yaml",
+ # All tests for TYPE: text
+ "test_type_text.yaml",
+ # All tests for TYPE: timestamp
+ "test_type_timestamp.yaml",
+ ]
+
+ _fail_tests = [
+ # All tests for keyword assert
+ ("test_assert.yaml", SchemaError),
+ # All tests for keyword default
+ ("test_default.yaml", SchemaError),
+ # All tests for keyword desc
+ ("test_desc.yaml", SchemaError),
+ # All tests for keyword enum
+ ("test_enum.yaml", SchemaError),
+ # All tests for keyword example
+ ("test_example.yaml", SchemaError),
+ # All tests for keyword extensions
+ ("test_extensions.yaml", SchemaError),
+ # All tests for keyword func
+ ("test_func.yaml", SchemaError),
+ # All tests for keyword ident
+ ("test_ident.yaml", SchemaError),
+ # All tests for keyword include
+ ("test_include.yaml", SchemaError),
+ # All tests for keyword length
+ ("test_length.yaml", SchemaError),
+ # All tests for keyword mapping
+ ("test_mapping.yaml", SchemaError),
+ # All tests for keyword matching
+ ("test_matching.yaml", SchemaError),
+ # All tests for keyword name
+ ("test_name.yaml", SchemaError),
+ # All tests for keyword nullable
+ ("test_nullable.yaml", SchemaError),
+ # All tests for keyword pattern
+ ("test_pattern.yaml", SchemaError),
+ # All tests for keyword range
+ ("test_range.yaml", SchemaError),
+ # All tests for keyword required
+ ("test_required.yaml", SchemaError),
+ # All tests for keyword schema
+ ("test_schema.yaml", SchemaError),
+ # All tests for keyword sequence
+ ("test_sequence.yaml", SchemaError),
+ # All tests for keyword unique
+ ("test_unique.yaml", SchemaError),
+ # All tests for keyword version
+ ("test_version.yaml", SchemaError),
+
+ # All test cases for Multiple sequence checks
+ ("test_sequence_multi.yaml", SchemaError),
+ # All test cases for merging
+ ("test_merge.yaml", SchemaError),
+ # All test cases for yaml anchors
+ ("test_anchor.yaml", SchemaError),
+
+ # All tests for TYPE: any
+ ("test_type_any.yaml", SchemaError),
+ # All tests for TYPE: bool
+ ("test_type_bool.yaml", SchemaError),
+ # All tests for TYPE: date
+ ("test_type_date.yaml", SchemaError),
+ # All tests for TYPE: float
+ ("test_type_float.yaml", SchemaError),
+ # All tests for TYPE: int
+ ("test_type_int.yaml", SchemaError),
+ # All tests for TYPE: map
+ ("test_type_map.yaml", SchemaError),
+ # All tests for TYPE: none
+ ("test_type_none.yaml", SchemaError),
+ # All tests for TYPE: number
+ ("test_type_number.yaml", SchemaError),
+ # All tests for TYPE: scalar
+ ("test_type_scalar.yaml", SchemaError),
+ # All tests for TYPE: seq
+ ("test_type_seq.yaml", SchemaError),
+ # All tests for TYPE: str
+ ("test_type_str.yaml", SchemaError),
+ # All tests for TYPE: symbol
+ ("test_type_symbol.yaml", SchemaError),
+ # All tests for TYPE: text
+ ("test_type_text.yaml", SchemaError),
+ # All tests for TYPE: timestamp
+ ("test_type_timestamp.yaml", SchemaError),
+ ]
+
+ # Add override magic to make it easier to test a specific file
+ if "S" in os.environ:
+ pass_tests = [os.environ["S"]]
+ _fail_tests = []
+ elif "F" in os.environ:
+ pass_tests = []
+ _fail_tests = [(os.environ["F"], SchemaError)]
+
+ for passing_test_file in pass_tests:
+ f = self.f(os.path.join("success", passing_test_file))
+ with open(f, "r") as stream:
+ yaml_data = yaml.safe_load_all(stream)
+
+ for document_index, document in enumerate(yaml_data):
+ data = document["data"]
+ schema = document["schema"]
+
+ try:
+ print("Running test files: {0}".format(f))
+ c = Core(source_data=data, schema_data=schema, strict_rule_validation=True, allow_assertions=True)
+ c.validate()
+ compare(c.validation_errors, [], prefix="No validation errors should exist...")
+ except Exception as e:
+ print("ERROR RUNNING FILES: {0} : {1}:{2}".format(f, document_index, document.get('name', 'UNKNOWN')))
+ raise e
+
+ # This serve as an extra schema validation that tests more complex structures then testrule.py do
+ compare(c.root_rule.schema_str, schema, prefix="Parsed rules is not correct, something have changed... files : {0} : {1}".format(f, document_index))
+
+ for failing_test, exception_type in _fail_tests:
+ f = self.f(os.path.join("fail", failing_test))
+ with open(f, "r") as stream:
+ yaml_data = yaml.safe_load_all(stream)
+
+ for document_index, document in enumerate(yaml_data):
+ data = document["data"]
+ schema = document["schema"]
+ errors = document.get("errors", [])
+
+ try:
+ print("Running test files: {0}".format(f))
+ c = Core(source_data=data, schema_data=schema, strict_rule_validation=True, allow_assertions=True)
+ c.validate()
+ except exception_type as e:
+ pass
+ else:
+ print("ERROR RUNNING FILES: {0} : {1}:{2}".format(f, document_index, document.get('name', 'UNKNOWN')))
+ raise AssertionError("Exception {0} not raised as expected... FILES: {1} : {2} : {3}:{4}".format(
+ exception_type, exception_type, failing_test, document_index, document.get('name', 'UNKNOWN')))
+
+ compare(sorted(c.validation_errors), sorted(errors), prefix="Wrong validation errors when parsing files : {0} : {1} : {2}".format(
+ f, document_index, document.get('name', 'UNKNOWN')))
diff --git a/tests/test_core_methods.py b/tests/test_core_methods.py
new file mode 100644
index 0000000..2208b79
--- /dev/null
+++ b/tests/test_core_methods.py
@@ -0,0 +1,308 @@
+# -*- coding: utf-8 -*-
+import pytest
+from datetime import datetime
+
+from pykwalify.compat import unicode
+from pykwalify.core import Core
+# from pykwalify.errors import NotSequenceError, CoreError
+from pykwalify.errors import CoreError
+
+
+class Rule(object):
+ def __init__(self, sequence=None, mapping=None, rule_type=None):
+ self.sequence = sequence or []
+ self.mapping = mapping or {}
+ self.type = rule_type or ''
+
+
+def _remap_errors(c):
+ return [unicode(error) for error in c.errors]
+
+
+# TODO: Refactor this becuase it no longer raises NotSequenceError but it now adds an error to the
+# error stack and it should look for that one instead.
+# def test_validate_sequence():
+# # If the type is set to sequence but value is int, it should raise NotSequenceError
+# with pytest.raises(NotSequenceError):
+# c = Core(source_data={}, schema_data={})
+# c._validate_sequence(123, Rule(sequence=['']), '', [])
+
+
+def ec():
+ # Return a empty core object
+ return Core(source_data={}, schema_data={})
+
+
+def test_validate_range():
+ data_matrix = [
+ (10, 5, 10, 5, 7, []),
+ (None, None, None, None, 7, []),
+
+ (10, 5, None, None, 13, ["Type 'prefix' has size of '13', greater than max limit '10'. Path: '/'"]),
+ (10, 5, None, None, 3, ["Type 'prefix' has size of '3', less than min limit '5'. Path: '/'"]),
+ (10, 5, None, None, 13.5, ["Type 'prefix' has size of '13.5', greater than max limit '10'. Path: '/'"]),
+ (10, 5, None, None, 3.5, ["Type 'prefix' has size of '3.5', less than min limit '5'. Path: '/'"]),
+ (10, 5, None, None, 10, []),
+ (10, 5, None, None, 5, []),
+ (10, 5, None, None, 10.0, []),
+ (10, 5, None, None, 5.0, []),
+
+ (None, None, 10, 5, 13, ["Type 'prefix' has size of '13', greater than or equals to max limit(exclusive) '10'. Path: '/'"]),
+ (None, None, 10, 5, 3, ["Type 'prefix' has size of '3', less than or equals to min limit(exclusive) '5'. Path: '/'"]),
+ (None, None, 10, 5, 13.5, ["Type 'prefix' has size of '13.5', greater than or equals to max limit(exclusive) '10'. Path: '/'"]),
+ (None, None, 10, 5, 3.5, ["Type 'prefix' has size of '3.5', less than or equals to min limit(exclusive) '5'. Path: '/'"]),
+ (None, None, 10, 5, 10, ["Type 'prefix' has size of '10', greater than or equals to max limit(exclusive) '10'. Path: '/'"]),
+ (None, None, 10, 5, 5, ["Type 'prefix' has size of '5', less than or equals to min limit(exclusive) '5'. Path: '/'"]),
+ (None, None, 10, 5, 8, []),
+ (None, None, 10, 5, 7, []),
+ (None, None, 10, 5, 8.5, []),
+ (None, None, 10, 5, 7.5, []),
+ ]
+
+ for max_, min_, max_ex, min_ex, value, errors in data_matrix:
+ print(u"Testing data: {0} {1} {2} {3} {4}".format(max_, min_, max_ex, min_ex, value))
+ c = ec()
+ c._validate_range(max_, min_, max_ex, min_ex, value, '/', 'prefix')
+ assert _remap_errors(c) == errors
+
+ # Test value type validation inside the method
+ with pytest.raises(CoreError):
+ c = ec()
+ c._validate_range(5, 1, None, None, [1, 2, 3], '/', 'prefix')
+
+ with pytest.raises(CoreError):
+ c = ec()
+ c._validate_range(5, 1, None, None, {'a': 1, 'b': 2, 'c': 3}, '/', 'prefix')
+
+
+def test_validate_timestamp():
+ data_matrix = [
+ ("", ["Timestamp value is empty. Path: ''"]),
+ ("1234567", []),
+ ("2016-01-01", []),
+ ("2016-01-01 15:01", []),
+ (123, []),
+ (1.5, []),
+ (0, ["Integer value of timestamp can't be below 0"]),
+ (-1, ["Integer value of timestamp can't be below 0"]),
+ (3147483647, ["Integer value of timestamp can't be above 2147483647"]),
+ ([], ["Not a valid timestamp"]),
+ (datetime.now(), []),
+ (datetime.today(), []),
+ ]
+
+ for data in data_matrix:
+ c = ec()
+ c._validate_scalar_timestamp(data[0], '')
+ assert _remap_errors(c) == data[1]
+
+
+def test_validate_date():
+ formats = ["%Y-%m-%d"]
+
+ data_matrix = [
+ (datetime.now(), [[], []]),
+ (datetime.today(), [[], []]),
+ ("1234567", [["Not a valid date: 1234567 format: %Y-%m-%d. Path: ''"], []]),
+ ("2016-01-01", [[], []]),
+ ("2016-01-01 15:01", [["Not a valid date: 2016-01-01 15:01 format: %Y-%m-%d. Path: ''"], []]),
+ (-1, [["Not a valid date: -1 date must be a string or a datetime.date not a 'int'"], []]),
+ (0, [["Not a valid date: 0 date must be a string or a datetime.date not a 'int'"], []]),
+ (1.5, [["Not a valid date: 1.5 date must be a string or a datetime.date not a 'float'"], []]),
+ (3147483647, [["Not a valid date: 3147483647 date must be a string or a datetime.date not a 'int'"], []]),
+ ([], [["Not a valid date: [] date must be a string or a datetime.date not a 'list'"], []]),
+ ({}, [["Not a valid date: {} date must be a string or a datetime.date not a 'dict'"], []]),
+ ]
+
+ for data in data_matrix:
+ for i, format in enumerate(formats):
+ print("Validating: {0} Format: {1}".format(data[0], format))
+ c = ec()
+ c._validate_scalar_date(data[0], [format], '')
+ assert _remap_errors(c) == data[1][i]
+
+
+def test_validate_scalar_type():
+ # Test that when providing a scalar type that do not exists, it should raise an error
+ with pytest.raises(CoreError):
+ c = ec()
+ c._validate_scalar_type(True, True, '')
+
+ data_matrix = []
+
+ # Tests for str
+ data_matrix += [
+ ("", "str", []),
+ ("123", "str", []),
+ ("yes", "str", []),
+ ("no", "str", []),
+ (b"foobar", "str", []),
+ (u"Néron", "str", []),
+ (123, "str", ["Value '123' is not of type 'str'. Path: ''"]),
+ (None, "str", ["Value 'None' is not of type 'str'. Path: ''"]),
+ (3.14, "str", ["Value '3.14' is not of type 'str'. Path: ''"]),
+ (True, "str", ["Value 'True' is not of type 'str'. Path: ''"]),
+ ({'a': 'b'}, "str", ["Value '{'a': 'b'}' is not of type 'str'. Path: ''"]),
+ (['a', 'b'], "str", ["Value '['a', 'b']' is not of type 'str'. Path: ''"]),
+ ]
+
+ # Tests for int
+ data_matrix += [
+ (123, "int", []),
+ (3.14, "int", ["Value '3.14' is not of type 'int'. Path: ''"]),
+ ("", "int", ["Value '' is not of type 'int'. Path: ''"]),
+ ("123", "int", ["Value '123' is not of type 'int'. Path: ''"]),
+ # (b"foobar", "int", ["Value b'foobar' is not of type 'int'. Path: ''"]),
+ (u"Néron", "int", [u"Value 'Néron' is not of type 'int'. Path: ''"]),
+ (None, "int", ["Value 'None' is not of type 'int'. Path: ''"]),
+ (True, "int", ["Value 'True' is not of type 'int'. Path: ''"]),
+ ({'a': 'b'}, "int", ["Value '{'a': 'b'}' is not of type 'int'. Path: ''"]),
+ (['a', 'b'], "int", ["Value '['a', 'b']' is not of type 'int'. Path: ''"]),
+ ]
+
+ # Tests for float type
+ data_matrix += [
+ ("1e-06", 'float', []),
+ ("1z-06", 'float', ["Value '1z-06' is not of type 'float'. Path: ''"]),
+ (1.5, 'float', []),
+ ("abc", 'float', ["Value 'abc' is not of type 'float'. Path: ''"]),
+ # (b"abc", 'float', ["Value 'abc' is not of type 'float'. Path: ''"]),
+ (u"abc", 'float', ["Value 'abc' is not of type 'float'. Path: ''"]),
+ (True, 'float', ["Value 'True' is not of type 'float'. Path: ''"]),
+ ]
+
+ # Tests for bool
+ data_matrix += [
+ (True, "bool", []),
+ (False, "bool", []),
+ (1, "bool", ["Value '1' is not of type 'bool'. Path: ''"]),
+ (3.14, "bool", ["Value '3.14' is not of type 'bool'. Path: ''"]),
+ ("", "bool", ["Value '' is not of type 'bool'. Path: ''"]),
+ ("yes", "bool", ["Value 'yes' is not of type 'bool'. Path: ''"]),
+ ("no", "bool", ["Value 'no' is not of type 'bool'. Path: ''"]),
+ # (b"foobar", "bool", [b"Value 'foobar' is not of type 'bool'. Path: ''"]),
+ (u"Néron", "bool", [u"Value 'Néron' is not of type 'bool'. Path: ''"]),
+ ([], "bool", ["Value '[]' is not of type 'bool'. Path: ''"]),
+ ({}, "bool", ["Value '{}' is not of type 'bool'. Path: ''"]),
+ ]
+
+ # Tests for number
+ data_matrix += [
+ (1, "number", []),
+ (3.14, "number", []),
+ (True, "number", ["Value 'True' is not of type 'number'. Path: ''"]),
+ (False, "number", ["Value 'False' is not of type 'number'. Path: ''"]),
+ ("", "number", ["Value '' is not of type 'number'. Path: ''"]),
+ ("yes", "number", ["Value 'yes' is not of type 'number'. Path: ''"]),
+ ("no", "number", ["Value 'no' is not of type 'number'. Path: ''"]),
+ # (b"foobar", "number", [b"Value 'foobar' is not of type 'number'. Path: ''"]),
+ (u"Néron", "number", [u"Value 'Néron' is not of type 'number'. Path: ''"]),
+ ([], "number", ["Value '[]' is not of type 'number'. Path: ''"]),
+ ({}, "number", ["Value '{}' is not of type 'number'. Path: ''"]),
+ ]
+
+ # Tests for text
+ data_matrix += [
+ (1, "text", []),
+ (3.14, "text", []),
+ ("", "text", []),
+ ("yes", "text", []),
+ ("no", "text", []),
+ # (b"foobar", "text", []),
+ (u"Néron", "text", []),
+ (True, "text", ["Value 'True' is not of type 'text'. Path: ''"]),
+ (False, "text", ["Value 'False' is not of type 'text'. Path: ''"]),
+ ([], "text", ["Value '[]' is not of type 'text'. Path: ''"]),
+ ({}, "text", ["Value '{}' is not of type 'text'. Path: ''"]),
+ (datetime(2015, 10, 24, 10, 22, 18), "text", ["Value '2015-10-24 10:22:18' is not of type 'text'. Path: ''"]),
+ ]
+
+ # Tests for any
+ data_matrix += [
+ (1, "any", []),
+ (3.14, "any", []),
+ (True, "any", []),
+ (False, "any", []),
+ ("", "any", []),
+ ("yes", "any", []),
+ ("no", "any", []),
+ # (b"foobar", "any", []),
+ (u"Néron", "any", []),
+ ([], "any", []),
+ ({}, "any", []),
+ (datetime(2015, 10, 24, 10, 22, 18), "any", []),
+ ]
+
+ # Tests for enum
+ data_matrix += [
+ ("", "enum", []),
+ ("123", "enum", []),
+ ("yes", "enum", []),
+ ("no", "enum", []),
+ # (b"foobar", "enum", []),
+ (u"Néron", "enum", []),
+ (123, "enum", ["Value '123' is not of type 'enum'. Path: ''"]),
+ (None, "enum", ["Value 'None' is not of type 'enum'. Path: ''"]),
+ (3.14, "enum", ["Value '3.14' is not of type 'enum'. Path: ''"]),
+ (True, "enum", ["Value 'True' is not of type 'enum'. Path: ''"]),
+ ({'a': 'b'}, "enum", ["Value '{'a': 'b'}' is not of type 'enum'. Path: ''"]),
+ (['a', 'b'], "enum", ["Value '['a', 'b']' is not of type 'enum'. Path: ''"]),
+ ]
+
+ # Tests for none
+ data_matrix += [
+ ("", "none", ["Value '' is not of type 'none'. Path: ''"]),
+ ("123", "none", ["Value '123' is not of type 'none'. Path: ''"]),
+ ("yes", "none", ["Value 'yes' is not of type 'none'. Path: ''"]),
+ ("no", "none", ["Value 'no' is not of type 'none'. Path: ''"]),
+ ("None", "none", ["Value 'None' is not of type 'none'. Path: ''"]),
+ # (b"foobar", "none", [b"Value 'foobar' is not of type 'none'. Path: ''"]),
+ (u"Néron", "none", [u"Value 'Néron' is not of type 'none'. Path: ''"]),
+ (123, "none", ["Value '123' is not of type 'none'. Path: ''"]),
+ (None, "none", []),
+ (3.14, "none", ["Value '3.14' is not of type 'none'. Path: ''"]),
+ (True, "none", ["Value 'True' is not of type 'none'. Path: ''"]),
+ ({'a': 'b'}, "none", ["Value '{'a': 'b'}' is not of type 'none'. Path: ''"]),
+ (['a', 'b'], "none", ["Value '['a', 'b']' is not of type 'none'. Path: ''"]),
+
+ ]
+
+ # Tests for timestamp
+ data_matrix += [
+ ("", 'timestamp', []),
+ ("1234567", 'timestamp', []),
+ ("2016-01-01", 'timestamp', []),
+ ("2016-01-01 15:01", 'timestamp', []),
+ # (b"foobar", "timestamp", []),
+ (u"Néron", "timestamp", []),
+ (123, 'timestamp', []),
+ (1.5, 'timestamp', []),
+ (0, 'timestamp', []),
+ (-1, 'timestamp', []),
+ (3147483647, 'timestamp', []),
+ ([], 'timestamp', ["Value '[]' is not of type 'timestamp'. Path: ''"]),
+ (datetime.now(), 'timestamp', []),
+ (datetime.today(), 'timestamp', []),
+ ]
+
+ data_matrix += [
+ (datetime(2015, 10, 24, 10, 22, 18), 'scalar', []),
+ ("", "scalar", []),
+ ("2016-01-01", 'scalar', []),
+ ("2016-01-01 15:01", 'scalar', []),
+ ("123", 'scalar', []),
+ ("yes", 'scalar', []),
+ (u"Néron", 'scalar', []),
+ (None, 'scalar', ["Value 'None' is not of type 'scalar'. Path: ''"]),
+ (123, 'scalar', []),
+ (3.14, 'scalar', []),
+ (True, 'scalar', []),
+ ({'a': 'b'}, 'scalar', ["Value '{'a': 'b'}' is not of type 'scalar'. Path: ''"]),
+ (['a', 'b'], 'scalar', ["Value '['a', 'b']' is not of type 'scalar'. Path: ''"]),
+ ]
+
+ for data in data_matrix:
+ print(u"Testing data: '{0!s}', '{1!s}', '{2!s}'".format(*data))
+ c = ec()
+ c._validate_scalar_type(data[0], data[1], '')
+ assert _remap_errors(c) == data[2]
diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py
new file mode 100644
index 0000000..7f02aab
--- /dev/null
+++ b/tests/test_exceptions.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+# pykwalify imports
+from pykwalify import errors
+
+
+class TestCLI(object):
+
+ def test_base_exception(self):
+ # retcode=2 should be schemaerror
+ e = errors.PyKwalifyException(msg="foobar", retcode=2)
+ assert e.__repr__() == "PyKwalifyException(msg='foobar')"
+ assert e.retname == "schemaerror"
+
+ def test_create_sub_class_exceptions(self):
+ u_e = errors.UnknownError()
+ assert u_e.retcode == 1
+
+ s_e = errors.SchemaError()
+ assert s_e.retcode == 2
+
+ c_e = errors.CoreError()
+ assert c_e.retcode == 3
+
+ r_e = errors.RuleError()
+ assert r_e.retcode == 4
+
+ sc_e = errors.SchemaConflict()
+ assert sc_e.retcode == 5
diff --git a/tests/test_helper.py b/tests/test_helper.py
new file mode 100644
index 0000000..8976b5f
--- /dev/null
+++ b/tests/test_helper.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+
+# python std lib
+import logging
+import logging.config
+
+log = logging.getLogger()
+
+
+# Set the root logger to be silent so all code that uses the python logger
+# will not print anything unless we want it to, then it should be specified
+# in each test and reseted after that test
+def _set_log_lv(level=1337, loggers=None):
+ """ If no level is set then level will be so high all logging is silenced
+ """
+ if loggers is None:
+ # If no additional loggers is specified then only apply to root logger
+ log.setLevel(level)
+ for handler in log.handlers:
+ handler.level = level
+ else:
+ # If we have other logging instances specified apply to root logger and them
+ if log not in loggers:
+ loggers.append(log)
+
+ for log_instance in loggers:
+ log_instance.setLevel(level)
+ for handler in log_instance.handlers:
+ handler.level = level
+
+
+# Initially silence all logging
+_set_log_lv()
diff --git a/tests/test_rule.py b/tests/test_rule.py
new file mode 100644
index 0000000..4b2b7c9
--- /dev/null
+++ b/tests/test_rule.py
@@ -0,0 +1,399 @@
+# -*- coding: utf-8 -*-
+
+""" Unit test for pyKwalify - Rule """
+
+# python std lib
+import unittest
+
+# 3rd party imports
+import pytest
+
+# pyKwalify imports
+import pykwalify
+from pykwalify.errors import RuleError, SchemaConflict
+from pykwalify.rule import Rule
+from pykwalify.compat import unicode
+
+
+class TestRule(unittest.TestCase):
+
+ def setUp(self):
+ pykwalify.partial_schemas = {}
+
+ def test_schema(self):
+ # Test that when using both schema; and include tag that it throw an error because schema; tags should be parsed via Core()
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"schema;str": {"type": "map", "mapping": {"foo": {"type": "str"}}}, "type": "map", "mapping": {"foo": {"include": "str"}}})
+ assert str(r.value) == "<RuleError: error code 4: Schema is only allowed on top level of schema file: Path: '/'>"
+ assert r.value.error_key == 'schema.not.toplevel'
+
+ def test_unkknown_key(self):
+ # Test that providing an unknown key raises exception
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "str", "foobar": True})
+ assert str(r.value) == "<RuleError: error code 4: Unknown key: foobar found: Path: '/'>"
+ assert r.value.error_key == 'key.unknown'
+
+ def test_matching_rule(self):
+ # Test that exception is raised when a invalid matching rule is used
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "map", "matching-rule": "foobar", "mapping": {"regex;.+": {"type": "seq", "sequence": [{"type": "str"}]}}})
+ assert str(r.value) == "<RuleError: error code 4: Specified rule in key: foobar is not part of allowed rule set : ['any', 'all']: Path: '/'>"
+ assert r.value.error_key == 'matching_rule.not_allowed'
+
+ def test_allow_empty_map(self):
+ r = Rule(schema={"type": "map", "allowempty": True, "mapping": {"name": {"type": "str"}}})
+ assert r.allowempty_map is True
+
+ def test_type_value(self):
+ # TODO: This test is currently semi broken, partial schemas might be somewhat broken possibly
+ # # Test that when only having a schema; rule it should throw error
+ # with pytest.raises(RuleError) as r:
+ # Rule(schema={"schema;fooone": {"type": "map", "mapping": {"foo": {"type": "str"}}}})
+ # assert str(r.value) == "<RuleError: error code 4: Key 'type' not found in schema rule: Path: '/'>"
+ # assert r.value.error_key == 'type.missing'
+
+ # Test a valid rule with both "str" and "unicode" types work
+ r = Rule(schema={"type": str("str")})
+ r = Rule(schema={"type": unicode("str")})
+
+ # Test that type key must be string otherwise exception is raised
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": 1})
+ assert str(r.value) == "<RuleError: error code 4: Key 'type' in schema rule is not a string type (found int): Path: '/'>"
+ assert r.value.error_key == 'type.not_string'
+
+ # this tests that the type key must be a string
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": 1}, parent=None)
+ assert str(r.value) == "<RuleError: error code 4: Key 'type' in schema rule is not a string type (found int): Path: '/'>"
+ assert r.value.error_key == 'type.not_string'
+
+ def test_name_value(self):
+ with pytest.raises(RuleError) as r:
+ Rule(schema={'type': 'str', 'name': {}})
+ assert str(r.value) == "<RuleError: error code 4: Value: {} for keyword name must be a string: Path: '/'>"
+
+ def test_nullable_value(self):
+ # Test that nullable value must be bool otherwise exception is raised
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "str", "nullable": "foobar"})
+ assert str(r.value) == "<RuleError: error code 4: Value: 'foobar' for nullable keyword must be a boolean: Path: '/'>"
+ assert r.value.error_key == 'nullable.not_bool'
+
+ def test_desc_value(self):
+ with pytest.raises(RuleError) as r:
+ Rule(schema={'type': 'str', 'desc': []})
+ assert str(r.value) == "<RuleError: error code 4: Value: [] for keyword desc must be a string: Path: '/'>"
+
+ def test_example_value(self):
+ with pytest.raises(RuleError) as r:
+ Rule(schema={'type': 'str', 'example': []})
+ assert str(r.value) == "<RuleError: error code 4: Value: [] for keyword example must be a string: Path: '/'>"
+
+ def test_required_value(self):
+ # Test that required value must be bool otherwise exception is raised
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "str", "required": "foobar"})
+ assert str(r.value) == "<RuleError: error code 4: Value: 'foobar' for required keyword must be a boolean: Path: '/'>"
+ assert r.value.error_key == 'required.not_bool'
+
+ def test_pattern_value(self):
+ # this tests a invalid regexp pattern
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "str", "pattern": "/@/\\"})
+ assert str(r.value) == "<RuleError: error code 4: Syntax error when compiling regex pattern: None: Path: '/'>"
+ assert r.value.error_key == 'pattern.syntax_error'
+
+ # Test that pattern keyword is not allowed when using a map
+ # with self.assertRaisesRegexp(RuleError, ".+map\.pattern.+"):
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "map", "pattern": "^[a-z]+$", "allowempty": True, "mapping": {"name": {"type": "str"}}})
+ assert str(r.value) == "<RuleError: error code 4: Keyword pattern is not allowed inside map: Path: '/'>"
+ assert r.value.error_key == 'pattern.not_allowed_in_map'
+
+ # Test that pattern value must be string otherwise exception is raised
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "str", "pattern": 1})
+ assert str(r.value) == "<RuleError: error code 4: Value of pattern keyword: '1' is not a string: Path: '/'>"
+ assert r.value.error_key == 'pattern.not_string'
+
+ def test_date_and_format_value(self):
+ r = Rule(schema={"type": "date", "format": "%y"})
+ assert r.format is not None, "date var not set proper"
+ assert isinstance(r.format, list), "date format should be a list"
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "date", "format": 1})
+ assert str(r.value) == "<RuleError: error code 4: Value of format keyword: '1' must be a string or list or string values: Path: '/'>"
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "map", "format": "%y"})
+ assert str(r.value) == "<RuleError: error code 4: Keyword format is only allowed when used with the following types: ('date',): Path: '/'>"
+
+ def test_enum_value(self):
+ # this tests the various valid enum types
+ Rule(schema={"type": "int", "enum": [1, 2, 3]})
+ Rule(schema={"type": "bool", "enum": [True, False]})
+ r = Rule(schema={"type": "str", "enum": ["a", "b", "c"]})
+ assert r.enum is not None, "enum var is not set proper"
+ assert isinstance(r.enum, list), "enum is not set to a list"
+ assert len(r.enum) == 3, "invalid length of enum entries"
+
+ # this tests the missmatch between the type and the data inside a enum
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "str", "enum": [1, 2, 3]})
+ assert str(r.value).startswith("<RuleError: error code 4: Item: '1' in enum is not of correct class type:")
+ assert r.value.error_key == 'enum.type.unmatch'
+
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "str", "enum": True})
+ assert str(r.value) == "<RuleError: error code 4: Enum is not a sequence: Path: '/'>"
+ assert r.value.error_key == 'enum.not_seq'
+
+ def test_assert_value(self):
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "seq", "sequence": [{"type": "str", "assert": 1}]})
+ assert str(r.value) == "<RuleError: error code 4: Value: '1' for keyword 'assert' is not a string: Path: '/sequence/0'>"
+ assert r.value.error_key == 'assert.not_str'
+
+ # Test that invalid characters is not present
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "seq", "sequence": [{"type": "str", "assert": "__import__"}]})
+ assert str(r.value) == "<RuleError: error code 4: Value: '__import__' contain invalid content that is not allowed to be present in assertion keyword: Path: '/sequence/0'>" # NOQA: E501
+ assert r.value.error_key == 'assert.unsupported_content'
+
+ def test_length(self):
+ r = Rule(schema={"type": "int", "length": {"max": 10, "min": 1}})
+ assert r.length is not None, "length var not set proper"
+ assert isinstance(r.length, dict), "range var is not of dict type"
+
+ # this tests that the range key must be a dict
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "int", "length": []})
+ assert str(r.value) == "<RuleError: error code 4: Length value is not a dict type: '[]': Path: '/'>"
+ assert r.value.error_key == 'length.not_map'
+
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "str", "length": {"max": "z"}})
+ assert str(r.value) == "<RuleError: error code 4: Value: 'z' for 'max' keyword is not a number: Path: '/'>"
+ assert r.value.error_key == 'length.max.not_number'
+
+ # this tests that min is bigger then max that should not be possible
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "int", "length": {"max": 10, "min": 11}})
+ assert str(r.value) == "<RuleError: error code 4: Value for 'max' can't be less then value for 'min'. 10 < 11: Path: '/'>"
+ assert r.value.error_key == 'length.max_lt_min'
+
+ # test that min-ex is bigger then max-ex, that should not be possible
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "int", "length": {"max-ex": 10, "min-ex": 11}})
+ assert str(r.value) == "<RuleError: error code 4: Value for 'max-ex' can't be less then value for 'min-ex'. 10 <= 11: Path: '/'>"
+ assert r.value.error_key == 'length.max-ex_le_min-ex'
+
+ # test that a string has non negative boundaries
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "str", "length": {"max": -1, "min": -2}})
+ assert str(r.value) == "<RuleError: error code 4: Value for 'min' can't be negative in case of type str.: Path: '/'>"
+ assert r.value.error_key == 'length.min_negative'
+
+ # test that a seq has non negative boundaries
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "seq", "length": {"max": 3, "min": -2}})
+ assert str(r.value) == "<RuleError: error code 4: Value for 'min' can't be negative in case of type seq.: Path: '/'>"
+ assert r.value.error_key == 'length.min_negative'
+
+ def test_range_value(self):
+ r = Rule(schema={"type": "int", "range": {"max": 10, "min": 1}})
+ assert r.range is not None, "range var not set proper"
+ assert isinstance(r.range, dict), "range var is not of dict type"
+
+ # this tests that the range key must be a dict
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "int", "range": []})
+ assert str(r.value) == "<RuleError: error code 4: Range value is not a dict type: '[]': Path: '/'>"
+ assert r.value.error_key == 'range.not_map'
+
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "str", "range": {"max": "z"}})
+ assert str(r.value) == "<RuleError: error code 4: Value: 'z' for 'max' keyword is not a number: Path: '/'>"
+ assert r.value.error_key == 'range.max.not_number'
+
+ # this tests that min is bigger then max that should not be possible
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "int", "range": {"max": 10, "min": 11}})
+ assert str(r.value) == "<RuleError: error code 4: Value for 'max' can't be less then value for 'min'. 10 < 11: Path: '/'>"
+ assert r.value.error_key == 'range.max_lt_min'
+
+ # test that min-ex is bigger then max-ex, that should not be possible
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "int", "range": {"max-ex": 10, "min-ex": 11}})
+ assert str(r.value) == "<RuleError: error code 4: Value for 'max-ex' can't be less then value for 'min-ex'. 10 <= 11: Path: '/'>"
+ assert r.value.error_key == 'range.max-ex_le_min-ex'
+
+ # test that a string has non negative boundaries
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "str", "range": {"max": -1, "min": -2}})
+ assert str(r.value) == "<RuleError: error code 4: Value for 'min' can't be negative in case of type str.: Path: '/'>"
+ assert r.value.error_key == 'range.min_negative'
+
+ # test that a seq has non negative boundaries
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "seq", "range": {"max": 3, "min": -2}})
+ assert str(r.value) == "<RuleError: error code 4: Value for 'min' can't be negative in case of type seq.: Path: '/'>"
+ assert r.value.error_key == 'range.min_negative'
+
+ def test_ident_value(self):
+ pass
+
+ def test_unique_value(self):
+ # this tests that this cannot be used in the root level
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "str", "unique": True})
+ assert str(r.value) == "<RuleError: error code 4: Keyword 'unique' can't be on root level of schema: Path: '/'>"
+ assert r.value.error_key == 'unique.not_on_root_level'
+
+ # this tests that unique cannot be used at root level
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "seq", "unique": True})
+ assert str(r.value) == "<RuleError: error code 4: Type of the value: 'seq' for 'unique' keyword is not a scalar type: Path: '/'>"
+ assert r.value.error_key == 'unique.not_scalar'
+
+ def test_sequence(self):
+ # this tests seq type with a internal type of str
+ r = Rule(schema={"type": "seq", "sequence": [{"type": "str"}]})
+ assert r.type is not None, "rule not contain type var"
+ assert r.type == "seq", "type not 'seq'"
+ assert r.sequence is not None, "rule not contain sequence var"
+ assert isinstance(r.sequence, list), "rule is not a list"
+
+ # Test basic sequence rule
+ r = Rule(schema={"type": "seq", "sequence": [{"type": "str"}]})
+ assert r.type == "seq"
+ assert isinstance(r.sequence, list)
+ assert isinstance(r.sequence[0], Rule)
+ assert r.sequence[0].type == "str"
+
+ # Test sequence without explicit type
+ r = Rule(schema={"sequence": [{"type": "str"}]})
+ assert r.type == "seq"
+ assert isinstance(r.sequence, list)
+ assert isinstance(r.sequence[0], Rule)
+ assert r.sequence[0].type == "str"
+
+ # Test short name 'seq'
+ r = Rule(schema={"seq": [{"type": "str"}]})
+ assert r.type == "seq"
+ assert isinstance(r.sequence, list)
+ assert isinstance(r.sequence[0], Rule)
+ assert r.sequence[0].type == "str"
+
+ # Test error is raised when sequence key is missing
+ with pytest.raises(SchemaConflict) as ex:
+ Rule(schema={"type": "seq"})
+ assert str(ex.value) == "<SchemaConflict: error code 5: Type is sequence but no sequence alias found on same level: Path: '/'>"
+
+ # sequence and pattern can't be used at same time
+ with pytest.raises(SchemaConflict) as ex:
+ Rule(schema={"type": "seq", "sequence": [{"type": "str"}], "pattern": "..."})
+ assert str(ex.value) == "<SchemaConflict: error code 5: Sequence and pattern can't be on the same level in the schema: Path: '/'>"
+
+ def test_build_sequence_multiple_values(self):
+ """
+ Test with multiple values.
+ """
+ # Test basic sequence rule
+ r = Rule(schema={'type': 'seq', 'sequence': [{'type': 'str'}, {'type': 'int'}]})
+ assert r.type == "seq"
+ assert r.matching == "any"
+ assert len(r.sequence) == 2
+ assert isinstance(r.sequence, list)
+ assert all(isinstance(r.sequence[i], Rule) for i in range(len(r.sequence)))
+ assert r.sequence[0].type == "str"
+ assert r.sequence[1].type == "int"
+
+ # Test sequence without explicit type
+ r = Rule(schema={'sequence': [{'type': 'str'}, {'type': 'int'}]})
+ assert r.type == "seq"
+ assert r.matching == "any"
+ assert len(r.sequence) == 2
+ assert isinstance(r.sequence, list)
+ assert all(isinstance(r.sequence[i], Rule) for i in range(len(r.sequence)))
+ assert r.sequence[0].type == "str"
+ assert r.sequence[1].type == "int"
+
+ # Test adding matchin rules
+
+ def test_mapping(self):
+ # This tests mapping with a nested type and pattern
+ r = Rule(schema={"type": "map", "mapping": {"name": {"type": "str", "pattern": ".+@.+"}}})
+ assert r.type == "map", "rule type is not map"
+ assert isinstance(r.mapping, dict), "mapping is not dict"
+ assert r.mapping["name"].type == "str", "nested mapping is not of string type"
+ assert r.mapping["name"].pattern is not None, "nested mapping has no pattern var set"
+ assert r.mapping["name"].pattern == ".+@.+", "pattern is not set to correct value"
+
+ # when type is specefied, 'mapping' key must be present
+ with pytest.raises(SchemaConflict) as ex:
+ Rule(schema={"type": "map"})
+ assert str(ex.value) == "<SchemaConflict: error code 5: Type is mapping but no mapping alias found on same level: Path: '/'>"
+
+ # 'map' and 'enum' can't be used at same time
+ # TODO: This do not work because it currently raises RuleError: <RuleError: error code 4: enum.notscalar>
+ # with pytest.raises(SchemaConflict):
+ # r = Rule(schema={"type": "map", "enum": [1, 2, 3]})
+
+ # Test that 'map' and 'mapping' can't be at the same level
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"map": {"stream": {"type": "any"}}, "mapping": {"seams": {"type": "any"}}})
+ assert str(r.value) == "<RuleError: error code 4: Keywords 'map' and 'mapping' can't be used on the same level: Path: '/'>"
+ assert r.value.error_key == 'mapping.duplicate_keywords'
+
+ # This will test that a invalid regex will throw error when parsing rules
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "map", "matching-rule": "any", "mapping": {"regex;(+": {"type": "seq", "sequence": [{"type": "str"}]}}})
+ assert str(r.value) == "<RuleError: error code 4: Unable to compile regex '(+': Path: '/'>"
+ assert r.value.error_key == 'mapping.regex.compile_error'
+
+ # this tests map/dict but with no elements
+ with pytest.raises(RuleError) as r:
+ Rule(schema={"type": "map", "mapping": {}})
+ assert str(r.value) == "<RuleError: error code 4: Mapping do not contain any elements: Path: '/'>"
+ assert r.value.error_key == 'mapping.no_elements'
+
+ def test_default_value(self):
+ pass
+
+ def test_check_conflicts(self):
+ # TODO: This do not work and enum schema conflict is not raised... RuleError: <RuleError: error code 4: enum.notscalar>
+ # with pytest.raises(SchemaConflict) as ex:
+ # r = Rule(schema={"type": "seq", "sequence": [{"type": "str"}], "enum": [1, 2, 3]})
+ # assert ex.value.msg.startswith("seq.conflict :: enum"), "Wrong exception was raised"
+
+ # Test sequence and mapping can't be used at same level
+ with pytest.raises(SchemaConflict) as ex:
+ Rule(schema={"type": "seq", "sequence": [{"type": "str"}], "mapping": {"name": {"type": "str", "pattern": ".+@.+"}}})
+ assert str(ex.value) == "<SchemaConflict: error code 5: Sequence and mapping can't be on the same level in the schema: Path: '/'>"
+ assert ex.value.error_key == 'seq.conflict.mapping'
+
+ # Mapping and sequence can't used at same time
+ with pytest.raises(SchemaConflict) as ex:
+ Rule(schema={"type": "map", "mapping": {"foo": {"type": "str"}}, "sequence": [{"type": "str"}]})
+ assert str(ex.value) == "<SchemaConflict: error code 5: Mapping and sequence can't be on the same level in the schema: Path: '/'>"
+ assert ex.value.error_key == 'map.conflict.sequence'
+
+ # scalar type and sequence can't be used at same time
+ with pytest.raises(SchemaConflict) as ex:
+ Rule(schema={"type": "int", "sequence": [{"type": "str"}]})
+ assert str(ex.value) == "<SchemaConflict: error code 5: Scalar and sequence can't be on the same level in the schema: Path: '/'>"
+ assert ex.value.error_key == 'scalar.conflict.sequence'
+
+ # scalar type and mapping can't be used at same time
+ with pytest.raises(SchemaConflict) as ex:
+ Rule(schema={"type": "int", "mapping": {"foo": {"type": "str"}}})
+ assert str(ex.value) == "<SchemaConflict: error code 5: Scalar and mapping can't be on the same level in the schema: Path: '/'>"
+ assert ex.value.error_key == 'scalar.conflict.mapping'
+
+ # scalar type and enum can't be used at same time
+ with pytest.raises(SchemaConflict) as ex:
+ Rule(schema={"type": "int", "enum": [1, 2, 3], "range": {"max": 10, "min": 1}})
+ assert str(ex.value) == "<SchemaConflict: error code 5: Enum and range can't be on the same level in the schema: Path: '/'>"
+ assert ex.value.error_key == 'enum.conflict.range'
diff --git a/tests/test_types.py b/tests/test_types.py
new file mode 100644
index 0000000..de7db87
--- /dev/null
+++ b/tests/test_types.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+""" Unit test for pyKwalify - Rule """
+
+# python std lib
+import unittest
+
+# pykwalify imports
+from pykwalify import types
+
+
+class TestTypes(unittest.TestCase):
+
+ def test_types(self):
+ """
+ Test that all type helper methods works correctly
+ """
+ assert types.type_class("str") == str
+
+ assert types.is_builtin_type("str")
+
+ assert types.is_collection_type("map")
+ assert types.is_collection_type("seq")
+ assert not types.is_collection_type("str")
+
+ assert types.is_scalar_type("str")
+ assert not types.is_scalar_type("seq")
+ assert not types.is_scalar_type("map")
+
+ assert types.is_collection([])
+ assert types.is_collection({})
+ assert not types.is_collection("foo")
+
+ assert types.is_scalar("")
+ assert types.is_scalar(True)
+ assert not types.is_scalar([])
+
+ assert types.is_correct_type("", str)
+ assert types.is_correct_type({}, dict)
+
+ assert types.is_string("foo")
+ assert not types.is_string([])
+
+ assert types.is_int(1)
+ assert not types.is_int("foo")
+
+ assert types.is_bool(True)
+ assert not types.is_bool(1)
+ assert not types.is_bool("true")
+
+ assert types.is_float(1.0)
+ assert not types.is_float("foo")
+
+ assert types.is_number(1)
+ assert types.is_number(1.0)
+ assert not types.is_number("foo")
+
+ assert types.is_text("foo")
+ assert types.is_text(1)
+ assert types.is_text(1.0)
+ assert not types.is_text([])
+ assert not types.is_text(True)
+
+ assert types.is_any("foo")
+ assert types.is_any(True)
+ assert types.is_any(1)
+ assert types.is_any(1.0)
+ assert types.is_any({})
+ assert types.is_any([])
+
+ assert types.is_enum("foo")
+ assert not types.is_enum(1)
+
+ assert types.is_none(None)
+ assert not types.is_none("foo")
diff --git a/tests/test_unicode.py b/tests/test_unicode.py
new file mode 100644
index 0000000..36f5549
--- /dev/null
+++ b/tests/test_unicode.py
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+
+""" Unit test for pyKwalify - Core """
+
+# python std lib
+import os
+
+# pykwalify imports
+import pykwalify
+from pykwalify.compat import unicode
+from pykwalify.core import Core
+from pykwalify.errors import SchemaError
+
+# 3rd party imports
+from pykwalify.compat import yaml
+from testfixtures import compare
+
+
+class TestUnicode(object):
+
+ def setUp(self):
+ pykwalify.partial_schemas = {}
+
+ def f(self, *args):
+ if os.path.isabs(args[0]):
+ return args[0]
+
+ return os.path.join(os.path.dirname(os.path.realpath(__file__)), "files", "unicode", *args)
+
+ def test_files_with_unicode_content_success(self, tmpdir):
+ """
+ These tests should pass with no exception raised
+ """
+ fail_data_2s_yaml = {
+ 'schema': {
+ 'type': 'map',
+ 'mapping': {
+ 'msg': {
+ 'type': 'int',
+ },
+ }
+ },
+ 'data': {
+ 'msg': 123,
+ },
+ 'errors': []
+ }
+
+ source_f = tmpdir.join(u"2så.json")
+ source_f.write(yaml.safe_dump(fail_data_2s_yaml, allow_unicode=True))
+
+ _pass_tests = [
+ # Test mapping with unicode key and value
+ u"1s.yaml",
+ # # Test unicode filename.
+ # It is not possible to package a file with unicode characters
+ # like åäö in the filename in some python versions.
+ # Mock a file with åäö during testing to properly simulate this again.
+ unicode(source_f),
+ # Test sequence with unicode keys
+ u"3s.yaml",
+ ]
+
+ for passing_test_files in _pass_tests:
+ f = self.f(passing_test_files)
+
+ with open(f, "r") as stream:
+ yaml_data = yaml.safe_load(stream)
+ data = yaml_data["data"]
+ schema = yaml_data["schema"]
+
+ try:
+ print(u"Running test files: {0}".format(f))
+ c = Core(source_data=data, schema_data=schema)
+ c.validate()
+ compare(c.validation_errors, [], prefix="No validation errors should exist...")
+ except Exception as e:
+ print(u"ERROR RUNNING FILES: {0}".format(f))
+ raise e
+
+ # This serve as an extra schema validation that tests more complex structures then testrule.py do
+ compare(c.root_rule.schema_str, schema, prefix=u"Parsed rules is not correct, something have changed... files : {0}".format(f))
+
+ def test_files_with_unicode_content_failing(self, tmpdir):
+ """
+ These tests should fail with the specified exception
+ """
+ # To trigger schema exception we must pass in a source file
+ fail_data_2f_yaml = {
+ 'schema': {
+ 'type': 'map',
+ 'mapping': {
+ 'msg': {
+ 'type': 'int',
+ },
+ }
+ },
+ 'data': {
+ 'msg': 'Foobar',
+ },
+ 'errors': ["Value 'Foobar' is not of type 'int'. Path: '/msg'"]
+ }
+
+ source_f = tmpdir.join(u"2få.json")
+ source_f.write(yaml.safe_dump(fail_data_2f_yaml, allow_unicode=True))
+
+ _fail_tests = [
+ # Test mapping with unicode key and value but wrong type
+ (u"1f.yaml", SchemaError),
+ # Test unicode filename with validation errors.
+ # It is not possible to package a file with unicode characters
+ # like åäö in the filename in some python versions.
+ # Mock a file with åäö during testing to properly simulate this again.
+ (unicode(source_f), SchemaError),
+ # Test unicode data inside seq but wrong type
+ (u"3f.yaml", SchemaError),
+ ]
+
+ for failing_test, exception_type in _fail_tests:
+ f = self.f(failing_test)
+
+ with open(f, "r") as stream:
+ yaml_data = yaml.safe_load(stream)
+ data = yaml_data["data"]
+ schema = yaml_data["schema"]
+ errors = yaml_data["errors"]
+
+ try:
+ print(u"Running test files: {0}".format(f))
+ c = Core(source_data=data, schema_data=schema)
+ c.validate()
+ except exception_type:
+ pass # OK
+ else:
+ raise AssertionError(u"Exception {0} not raised as expected... FILES: {1} : {2}".format(exception_type, exception_type))
+
+ compare(sorted(c.validation_errors), sorted(errors), prefix=u"Wrong validation errors when parsing files : {0}".format(f))
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..3710562
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,16 @@
+# Tox (http://tox.testrun.org/) is a tool for running tests in
+# multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip
+# install tox" and then run "tox" from this directory.
+
+[tox]
+envlist = py27, py33, py34, py35, flake8
+
+[testenv]
+sitepackages = False
+deps = -r{toxinidir}/dev-requirements.txt
+commands = python {envbindir}/coverage run --source pykwalify -p -m py.test -v
+
+[testenv:flake8]
+deps = flake8
+commands = flake8 --max-line-length=160 --show-source --statistics --exclude=.venv,.tox,dist,docs,build,.git