From 9299add24002220080e464969a37ccde73ff34b3 Mon Sep 17 00:00:00 2001 From: Abel Aoun Date: Mon, 5 Feb 2024 17:58:34 +0100 Subject: [PATCH 01/26] MNT: Update enum parsing This aligns xncml enum parsing behavior with xarray's netCDF4 backend behavior. --- CHANGELOG.md | 2 +- tests/test_parser.py | 4 ++-- xncml/parser.py | 17 +++++++---------- 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 68f0d9c..e497ca5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ **Breaking changes** - Nested group handling: Before this version, all groups were read, but conflicting variable names in-between groups would shadow data. Now, similarly to xarray ``open_dataset``, ``open_ncml`` accepts an optional ``group`` argument to specify which group should be read. When ``group`` is not specified, it defaults to the root group. Additionally ``group`` can be set to ``'*'`` so that every group is read and the hierarchy is flattened. In the event of conflicting variable/dimension names across groups, the conflicting name will be modified by appending ``'__n'`` where n is incremented. - +- Enums are no longer transformed into CF flag_values and flag_meanings attributes, instead they are stored in the ``encoding["dtype"].metadata`` of their respective variable. This is aligned with what is done on xarray. 0.4.0 (2024-01-08) ================== diff --git a/tests/test_parser.py b/tests/test_parser.py index 9676daa..fea0cce 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -333,8 +333,8 @@ def test_multiple_values_for_scalar(): def test_read_enum(): """A enum should be turned into CF flag_values and flag_meanings attributes.""" ds = xncml.open_ncml(data / 'testEnums.xml') - assert ds['be_or_not_to_be'].attrs['flag_values'] == [0, 1] - assert ds['be_or_not_to_be'].attrs['flag_meanings'] == ['false', 'true'] + assert ds.be_or_not_to_be.dtype.metadata['enum'] == {'false': 0, 'true': 1} + assert ds.be_or_not_to_be.dtype.metadata['enum_name'] == 'boolean' def test_empty_attr(): diff --git a/xncml/parser.py b/xncml/parser.py index a71f249..274b916 100644 --- a/xncml/parser.py +++ b/xncml/parser.py @@ -459,12 +459,9 @@ def read_enum(obj: EnumTypedef) -> dict[str, list]: Returns ------- dict: - A dictionary with CF flag_values and flag_meanings that describe the Enum. + A dictionary describing the Enum. """ - return { - 'flag_values': list(map(lambda e: e.key, obj.content)), - 'flag_meanings': list(map(lambda e: e.content[0], obj.content)), - } + return {e.content[0]: e.key for e in obj.content} def read_variable( @@ -472,7 +469,7 @@ def read_variable( ref: xr.Dataset, obj: Variable, dimensions: dict, - enums: dict, + enums: dict[str, dict[str, int]], group_path: str, ) -> xr.Dataset: """ @@ -576,10 +573,10 @@ def read_variable( raise NotImplementedError if obj.typedef in enums.keys(): - # TODO (@bzah): Update this once Enums are merged in xarray - # https://github.com/pydata/xarray/pull/8147 - out.attrs['flag_values'] = enums[obj.typedef]['flag_values'] - out.attrs['flag_meanings'] = enums[obj.typedef]['flag_meanings'] + dtype = out.dtype + new_dtype = np.dtype(dtype, metadata={'enum': enums[obj.typedef], 'enum_name': obj.typedef}) + out.encoding['dtype'] = new_dtype + out = out.astype(new_dtype) elif obj.typedef is not None: raise NotImplementedError import re From 2734e0bb4526173321f77d34484675ca94a50794 Mon Sep 17 00:00:00 2001 From: Abel Aoun Date: Wed, 7 Feb 2024 09:31:35 +0100 Subject: [PATCH 02/26] DOC: add version of xarray --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e497ca5..3ba985f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ **Breaking changes** - Nested group handling: Before this version, all groups were read, but conflicting variable names in-between groups would shadow data. Now, similarly to xarray ``open_dataset``, ``open_ncml`` accepts an optional ``group`` argument to specify which group should be read. When ``group`` is not specified, it defaults to the root group. Additionally ``group`` can be set to ``'*'`` so that every group is read and the hierarchy is flattened. In the event of conflicting variable/dimension names across groups, the conflicting name will be modified by appending ``'__n'`` where n is incremented. -- Enums are no longer transformed into CF flag_values and flag_meanings attributes, instead they are stored in the ``encoding["dtype"].metadata`` of their respective variable. This is aligned with what is done on xarray. +- Enums are no longer transformed into CF flag_values and flag_meanings attributes, instead they are stored in the ``encoding["dtype"].metadata`` of their respective variable. This is aligned with what is done on xarray v2024.01.0 0.4.0 (2024-01-08) ================== From f255474b5b2735d0e77ddda52bd5c0255ad594f4 Mon Sep 17 00:00:00 2001 From: Abel Aoun Date: Wed, 7 Feb 2024 10:38:37 +0100 Subject: [PATCH 03/26] ENH: Migrate to pyproject.toml --- .pre-commit-config.yaml | 1 + pyproject.toml | 58 +++++++++++++++++++++++++++++++++++++++++ requirements.txt | 8 ------ setup.cfg | 2 +- setup.py | 42 ----------------------------- 5 files changed, 60 insertions(+), 51 deletions(-) create mode 100644 pyproject.toml delete mode 100644 requirements.txt delete mode 100644 setup.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3842d1b..c3fbf2f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,6 +11,7 @@ repos: - id: check-docstring-first - id: check-json - id: check-yaml + - id: check-toml - id: double-quote-string-fixer - repo: https://github.com/psf/black-pre-commit-mirror diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..6e4a7a3 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,58 @@ +[build-system] +requires = [ "setuptools>=60", "setuptools-scm>=8.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "xncml" +authors = [ + {name = "Anderson Banihirwe", email = "abanihi@ucar.edu"} +] +maintainers = [ + {name = "Anderson Banihirwe", email = "abanihi@ucar.edu"}, + {name = "David Huard"}, +] +description = "Tools for manipulating and opening NCML (NetCDF Markup) files with/for xarray" +readme = {file = "README.md", content-type = "text/markdown"} +requires-python = ">=3.9.0" +keywords = ["xncml", "xarray", "netcdf", "ncml" ] +license = {file = "LICENSE"} +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Natural Language :: English", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Topic :: Scientific/Engineering :: Atmospheric Science" +] +dynamic = ["version"] +dependencies = [ + "xmltodict", + "xsdata", + "xarray", + "cftime", + "netCDF4", + "dask", + "psutil", + "setuptools", # explicitly required for python 3.12 +] + +[project.optional-dependencies] +dev = [ + "pytest", + "flake8", +] + +[project.urls] +Homepage="https://github.com/xarray-contrib/xncml" +Issues="https://github.com/xarray-contrib/xncml/issues" +Changelog="https://github.com/xarray-contrib/xncml/blob/master/CHANGELOG.md" + +[tool.setuptools] +packages = ["xncml"] + +[tool.setuptools_scm] +# empty but needed to ensure setuptools_scm is used diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 0086aae..0000000 --- a/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -xmltodict -xsdata -xarray -cftime -netCDF4 -dask -psutil -setuptools # explicitly required for python 3.12 diff --git a/setup.cfg b/setup.cfg index dcd1c04..d9bcf3a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -11,7 +11,7 @@ select = B,C,E,F,W,T4,B9 [isort] known_first_party=xncml -known_third_party=numpy,pkg_resources,psutil,pytest,setuptools,xarray,xmltodict,xsdata +known_third_party=numpy,pkg_resources,psutil,pytest,xarray,xmltodict,xsdata multi_line_output=3 include_trailing_comma=True force_grid_wrap=0 diff --git a/setup.py b/setup.py deleted file mode 100644 index 31bf94b..0000000 --- a/setup.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python - -"""The setup script.""" - -import os - -from setuptools import find_packages, setup - -if os.path.exists('requirements.txt'): - with open('requirements.txt') as f: - install_requires = f.read().strip().split('\n') -else: - install_requires = ['xmltodict', 'xsdata', 'xarray'] - -if os.path.exists('README.md'): - with open('README.md') as f: - long_description = f.read() -else: - long_description = '' - - -setup( - name='xncml', - description='Tools for manipulating and opening NCML (NetCDF Markup) files with/for xarray', - long_description=long_description, - maintainer='Anderson Banihirwe', - maintainer_email='abanihi@ucar.edu', - url='https://github.com/xarray-contrib/xncml', - packages=find_packages(), - package_dir={'xncml': 'xncml'}, - include_package_data=True, - install_requires=install_requires, - license='Apache 2.0', - zip_safe=False, - keywords='xncml, xarray, netcdf', - use_scm_version=True, - python_requires='>=3.9, <4', - setup_requires=['setuptools_scm', 'setuptools>=30.3.0', 'setuptools_scm_git_archive'], - extras_require={ - 'dev': ['pytest', 'flake8'], - }, -) From 60e0de012043c5530519c450140cfbb39e43c013 Mon Sep 17 00:00:00 2001 From: Abel Aoun Date: Wed, 14 Feb 2024 09:10:26 +0100 Subject: [PATCH 04/26] maint: Make use of ouranos' cookiecutter - migrate builder from setuptools to flint - migrate doc structure - move sources to /src/xncml/ dir (was in /xncml) - migrate mardown to rst - update cookiecuttered files with relevant xncml info --- .coveragerc | 5 - .cruft.json | 29 ++ .editorconfig | 24 + .flake8 | 30 ++ .git_archival.txt | 1 - .pre-commit-config.yaml | 54 ++- .readthedocs.yml | 2 +- .yamllint.yaml | 8 + AUTHORS.rst | 17 + CHANGELOG.md => CHANGELOG.rst | 22 +- CONTRIBUTING.rst | 258 ++++++++++ Makefile | 115 +++-- README.md | 32 -- README.rst | 82 ++++ ci/doc.yml | 13 +- ci/environment-dev-3.7.yml | 16 +- docs/Makefile | 179 +------ docs/authors.rst | 1 + docs/changelog.rst | 1 + docs/conf.py | 191 ++++++++ docs/contributing.rst | 1 + docs/index.rst | 37 ++ docs/make.bat | 278 ++--------- docs/readme.rst | 1 + docs/source/changelog.md | 6 - docs/source/conf.py | 263 ----------- docs/source/index.md | 37 -- docs/{source => }/tutorial.ipynb | 2 +- pyproject.toml | 205 +++++++- setup.cfg | 22 - src/xncml/__init__.py | 7 + {xncml => src/xncml}/core.py | 297 ++++++------ {xncml => src/xncml}/generated/__init__.py | 34 +- {xncml => src/xncml}/generated/ncml_2_2.py | 518 ++++++++++----------- {xncml => src/xncml}/parser.py | 122 ++--- tests/test_core.py | 250 +++++----- tests/test_parser.py | 313 ++++++------- tox.ini | 52 +++ xncml/__init__.py | 14 - 39 files changed, 1865 insertions(+), 1674 deletions(-) delete mode 100644 .coveragerc create mode 100644 .cruft.json create mode 100644 .editorconfig create mode 100644 .flake8 delete mode 100644 .git_archival.txt create mode 100644 .yamllint.yaml create mode 100644 AUTHORS.rst rename CHANGELOG.md => CHANGELOG.rst (65%) create mode 100644 CONTRIBUTING.rst delete mode 100644 README.md create mode 100644 README.rst create mode 100644 docs/authors.rst create mode 100644 docs/changelog.rst create mode 100644 docs/conf.py create mode 100644 docs/contributing.rst create mode 100644 docs/index.rst create mode 100644 docs/readme.rst delete mode 100644 docs/source/changelog.md delete mode 100644 docs/source/conf.py delete mode 100644 docs/source/index.md rename docs/{source => }/tutorial.ipynb (99%) delete mode 100644 setup.cfg create mode 100644 src/xncml/__init__.py rename {xncml => src/xncml}/core.py (61%) rename {xncml => src/xncml}/generated/__init__.py (50%) rename {xncml => src/xncml}/generated/ncml_2_2.py (53%) rename {xncml => src/xncml}/parser.py (86%) create mode 100644 tox.ini delete mode 100644 xncml/__init__.py diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 7988693..0000000 --- a/.coveragerc +++ /dev/null @@ -1,5 +0,0 @@ -[run] -omit = - tests/*.py - setup.py - xncml/__init__.py diff --git a/.cruft.json b/.cruft.json new file mode 100644 index 0000000..ffd0733 --- /dev/null +++ b/.cruft.json @@ -0,0 +1,29 @@ +{ + "template": "https://github.com/Ouranosinc/cookiecutter-pypackage.git", + "commit": "f391bbd6ee14ab2478c64a1f78b74bd9903cae81", + "checkout": null, + "context": { + "cookiecutter": { + "full_name": "Abel Aoun", + "email": "aoun.abel@gmail.com", + "github_username": "bzah", + "project_name": "xncml", + "project_slug": "xncml", + "project_short_description": "Tools for manipulating NcML (NetCDF Markup Language) files with/for xarray", + "pypi_username": "bzah", + "version": "0.5.0", + "use_pytest": "y", + "use_black": "n", + "use_conda": "n", + "add_pyup_badge": "n", + "make_docs": "y", + "add_translations": "n", + "command_line_interface": "No command-line interface", + "create_author_file": "y", + "open_source_license": "Apache Software License 2.0", + "generated_with_cruft": "y", + "_template": "https://github.com/Ouranosinc/cookiecutter-pypackage.git" + } + }, + "directory": null +} diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..70b8725 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,24 @@ +# http://editorconfig.org + +root = true + +[*] +indent_style = space +indent_size = 4 +trim_trailing_whitespace = true +insert_final_newline = true +charset = utf-8 +end_of_line = lf + +[*.{yaml,yml}] +indent_size = 2 + +[*.bat] +indent_style = tab +end_of_line = crlf + +[LICENSE] +insert_final_newline = false + +[Makefile] +indent_style = tab diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..1116575 --- /dev/null +++ b/.flake8 @@ -0,0 +1,30 @@ +[flake8] +exclude = + .eggs, + .git, + build, + docs, + tests +ignore = + AZ100, + AZ200, + AZ300, + C, + D, + E, + F, + W503 +per-file-ignores = +rst-roles = + doc, + mod, + py:attr, + py:attribute, + py:class, + py:const, + py:data, + py:func, + py:meth, + py:mod, + py:obj, + py:ref diff --git a/.git_archival.txt b/.git_archival.txt deleted file mode 100644 index 95cb3ee..0000000 --- a/.git_archival.txt +++ /dev/null @@ -1 +0,0 @@ -ref-names: $Format:%D$ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c3fbf2f..b67e324 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,6 @@ default_language_version: python: python3 repos: - - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: @@ -13,24 +12,45 @@ repos: - id: check-yaml - id: check-toml - id: double-quote-string-fixer - - - repo: https://github.com/psf/black-pre-commit-mirror - rev: 24.1.1 + - repo: https://github.com/pappasam/toml-sort + rev: v0.23.1 hooks: - - id: black - args: ["--line-length", "100", "--skip-string-normalization"] - - - repo: https://github.com/PyCQA/flake8 + - id: toml-sort-fix + - repo: https://github.com/pre-commit/pygrep-hooks + rev: v1.10.0 + hooks: + - id: python-check-blanket-noqa + - id: python-no-eval + - id: python-no-log-warn + - id: python-use-type-annotations + - id: rst-inline-touching-normal + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.3.7 + hooks: + - id: ruff + args: [ --fix ] + - id: ruff-format + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.3.7 + hooks: + - id: ruff + - repo: https://github.com/pycqa/flake8 rev: 7.0.0 hooks: - - id: flake8 - - - repo: https://github.com/asottile/seed-isort-config - rev: v2.2.0 + - id: flake8 + additional_dependencies: [ 'flake8-alphabetize', 'flake8-rst-docstrings' ] + args: [ '--config=.flake8' ] + - repo: https://github.com/adrienverge/yamllint.git + rev: v1.35.1 hooks: - - id: seed-isort-config - - - repo: https://github.com/PyCQA/isort - rev: 5.13.2 + - id: yamllint + args: [ '--config-file=.yamllint.yaml' ] + - repo: https://github.com/python-jsonschema/check-jsonschema + rev: 0.28.2 + hooks: + - id: check-github-workflows + - id: check-readthedocs + - repo: meta hooks: - - id: isort + - id: check-hooks-apply + - id: check-useless-excludes diff --git a/.readthedocs.yml b/.readthedocs.yml index 2edf6bc..2349055 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -13,7 +13,7 @@ build: # Build documentation in the docs/ directory with Sphinx sphinx: - configuration: docs/source/conf.py + configuration: docs/conf.py # Optionally set the version of Python and requirements required to build your docs conda: diff --git a/.yamllint.yaml b/.yamllint.yaml new file mode 100644 index 0000000..2f3b4a3 --- /dev/null +++ b/.yamllint.yaml @@ -0,0 +1,8 @@ +--- + +rules: + document-start: disable + line-length: + max: 120 + level: warning + truthy: disable diff --git a/AUTHORS.rst b/AUTHORS.rst new file mode 100644 index 0000000..27d92bb --- /dev/null +++ b/AUTHORS.rst @@ -0,0 +1,17 @@ +======= +Credits +======= + +Development Lead +---------------- + +* Anderson Banihirwe `@andersy005 `_ +* David Huard `@huard `_ + +Contributors +------------- + +* Trevor James Smith `@Zeitsperre `_ +* Pascal Bourgault `@Zeitsperre `_ +* Francis Charette-Migneault `@fmigneault `_ +* Abel Aoun `@bzah `_ diff --git a/CHANGELOG.md b/CHANGELOG.rst similarity index 65% rename from CHANGELOG.md rename to CHANGELOG.rst index 3ba985f..f32068c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.rst @@ -1,29 +1,31 @@ +Changelog +========= + 0.5.0 (unreleased) -================== +------------------ **Breaking changes** -- Nested group handling: - Before this version, all groups were read, but conflicting variable names in-between groups would shadow data. Now, similarly to xarray ``open_dataset``, ``open_ncml`` accepts an optional ``group`` argument to specify which group should be read. When ``group`` is not specified, it defaults to the root group. Additionally ``group`` can be set to ``'*'`` so that every group is read and the hierarchy is flattened. In the event of conflicting variable/dimension names across groups, the conflicting name will be modified by appending ``'__n'`` where n is incremented. + +- Nested group handling: Before this version, all groups were read, but conflicting variable names in-between groups would shadow data. Now, similarly to xarray ``open_dataset``, ``open_ncml`` accepts an optional ``group`` argument to specify which group should be read. When ``group`` is not specified, it defaults to the root group. Additionally ``group`` can be set to ``'*'`` so that every group is read and the hierarchy is flattened. In the event of conflicting variable/dimension names across groups, the conflicting name will be modified by appending ``'__n'`` where n is incremented. - Enums are no longer transformed into CF flag_values and flag_meanings attributes, instead they are stored in the ``encoding["dtype"].metadata`` of their respective variable. This is aligned with what is done on xarray v2024.01.0 0.4.0 (2024-01-08) -================== +------------------ - Add support for . By @bzah -- Update XSD schema and dataclasses to latest version from netcdf-java to add support - for unsigned types. By @bzah +- Update XSD schema and dataclasses to latest version from netcdf-java to add support for unsigned types. By @bzah - Add support for scalar variables. By @Bzah - [fix] empty attributes now are parsed into an empty string instead of crashing the parser. By @Bzah 0.3.1 (2023-11-10) -================== +------------------ - Add support for Python 3.12 - Drop support for Python 3.8 0.3 (2023-08-28) -================ +---------------- - Add `add_aggregation` and `add_variable_agg` to `Dataset` class. By @huard - Add `add_scan` to `Dataset` class. By @huard @@ -32,7 +34,7 @@ 0.2 (2023-02-23) -================ +---------------- - Implement `Dataset.rename_dataset_attribute`. By @huard - Allow empty `Dataset` creation. By @huard @@ -41,7 +43,7 @@ 0.1 Initial release (2022-11-24) -================================ +-------------------------------- - Manipulate NcML file: add & remove attributes, variables and dimensions. By @andersy005 - Implement `open_ncml`, which returns an `xarray.Dataset` built from an NcML. Note that diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000..b976dfe --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,258 @@ +.. highlight:: shell + +============ +Contributing +============ + +Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. + +You can contribute in many ways: + +Types of Contributions +---------------------- + +Report Bugs +~~~~~~~~~~~ + +Report bugs at https://github.com/xarray-contrib/xncml/issues. + +If you are reporting a bug, please include: + +* Your operating system name and version. +* Any details about your local setup that might be helpful in troubleshooting. +* Detailed steps to reproduce the bug. + +Fix Bugs +~~~~~~~~ + +Look through the GitHub issues for bugs. Anything tagged with "bug" and "help wanted" is open to whoever wants to implement it. + +Implement Features +~~~~~~~~~~~~~~~~~~ + +Look through the GitHub issues for features. Anything tagged with "enhancement" and "help wanted" is open to whoever wants to implement it. + +Write Documentation +~~~~~~~~~~~~~~~~~~~ + +xncml could always use more documentation, whether as part of the official xncml docs, in docstrings, or even on the web in blog posts, articles, and such. + +Submit Feedback +~~~~~~~~~~~~~~~ + +The best way to send feedback is to file an issue at https://github.com/xarray-contrib/xncml/issues. + +If you are proposing a feature: + +* Explain in detail how it would work. +* Keep the scope as narrow as possible, to make it easier to implement. +* Remember that this is a volunteer-driven project, and that contributions + are welcome. :) + +Get Started! +------------ + +.. note:: + + If you are new to using GitHub and `git`, please read `this guide `_ first. + +Ready to contribute? Here's how to set up ``xncml`` for local development. + +#. Fork the ``xncml`` repo on GitHub. +#. Clone your fork locally:: + + $ git clone git@github.com:your_name_here/xncml.git + +#. Install your local copy into a development environment. Using ``virtualenv`` (``virtualenvwrapper``), you can create a new development environment with:: + + $ python -m pip install flit virtualenvwrapper + $ mkvirtualenv xncml + $ cd xncml/ + $ flit install --symlink + + This installs ``xncml`` in an "editable" state, meaning that changes to the code are immediately seen by the environment. + +#. To ensure a consistent coding style, install the ``pre-commit`` hooks to your local clone:: + + $ pre-commit install + + On commit, ``pre-commit`` will check that ``flake8``, and ``ruff`` checks are passing, perform automatic fixes if possible, and warn of violations that require intervention. If your commit fails the checks initially, simply fix the errors, re-add the files, and re-commit. + + You can also run the hooks manually with:: + + $ pre-commit run -a + + If you want to skip the ``pre-commit`` hooks temporarily, you can pass the ``--no-verify`` flag to `$ git commit`. + +#. Create a branch for local development:: + + $ git checkout -b name-of-your-bugfix-or-feature + + Now you can make your changes locally. + +#. When you're done making changes, we **strongly** suggest running the tests in your environment or with the help of ``tox``:: + + $ python -m pytest + # Or, to run multiple build tests + $ tox + +#. Commit your changes and push your branch to GitHub:: + + $ git add . + $ git commit -m "Your detailed description of your changes." + $ git push origin name-of-your-bugfix-or-feature + + If ``pre-commit`` hooks fail, try re-committing your changes (or, if need be, you can skip them with `$ git commit --no-verify`). + +#. Submit a `Pull Request `_ through the GitHub website. + +#. When pushing your changes to your branch on GitHub, the documentation will automatically be tested to reflect the changes in your Pull Request. This build process can take several minutes at times. If you are actively making changes that affect the documentation and wish to save time, you can compile and test your changes beforehand locally with:: + + # To generate the html and open it in your browser + $ make docs + # To only generate the html + $ make autodoc + $ make -C docs html + # To simply test that the docs pass build checks + $ tox -e docs + +#. Once your Pull Request has been accepted and merged to the ``main`` branch, several automated workflows will be triggered: + + - The ``bump-version.yml`` workflow will automatically bump the patch version when pull requests are pushed to the ``main`` branch on GitHub. **It is not recommended to manually bump the version in your branch when merging (non-release) pull requests (this will cause the version to be bumped twice).** + - `ReadTheDocs` will automatically build the documentation and publish it to the `latest` branch of `xncml` documentation website. + - If your branch is not a fork (ie: you are a maintainer), your branch will be automatically deleted. + + You will have contributed your first changes to ``xncml``! + +Pull Request Guidelines +----------------------- + +Before you submit a pull request, check that it meets these guidelines: + +#. The pull request should include tests and should aim to provide `code coverage `_ for all new lines of code. You can use the ``--cov-report html --cov xncml`` flags during the call to ``pytest`` to generate an HTML report and analyse the current test coverage. + +#. If the pull request adds functionality, the docs should also be updated. Put your new functionality into a function with a docstring, and add the feature to the list in ``README.rst``. + +#. The pull request should work for Python 3.8, 3.9, 3.10, 3.11, and 3.12. Check that the tests pass for all supported Python versions. + +Tips +---- + +To run a subset of tests:: + +$ pytest tests.test_xncml + +To run specific code style checks:: + + $ black --check xncml tests + $ isort --check xncml tests + $ blackdoc --check xncml docs + $ ruff xncml tests + $ flake8 xncml tests + +To get ``black``, ``isort``, ``blackdoc``, ``ruff``, and ``flake8`` (with plugins ``flake8-alphabetize`` and ``flake8-rst-docstrings``) simply install them with `pip` into your environment. + +Versioning/Tagging +------------------ + +A reminder for the **maintainers** on how to deploy. This section is only relevant when producing a new point release for the package. + +.. warning:: + + It is important to be aware that any changes to files found within the ``xncml`` folder (with the exception of ``xncml/__init__.py``) will trigger the ``bump-version.yml`` workflow. Be careful not to commit changes to files in this folder when preparing a new release. + +#. Create a new branch from `main` (e.g. `release-0.2.0`). +#. Update the `CHANGES.rst` file to change the `Unreleased` section to the current date. +#. Bump the version in your branch to the next version (e.g. `v0.1.0 -> v0.2.0`):: + + $ bump-my-version bump minor # In most cases, we will be releasing a minor version + $ git push + +#. Create a pull request from your branch to `main`. +#. Once the pull request is merged, create a new release on GitHub. On the main branch, run:: + + $ git tag v0.2.0 + $ git push --tags + + This will trigger a GitHub workflow to build the package and upload it to TestPyPI. At the same time, the GitHub workflow will create a draft release on GitHub. Assuming that the workflow passes, the final release can then be published on GitHub by finalizing the draft release. + +#. Once the release is published, the `publish-pypi.yml` workflow will go into an `awaiting approval` mode on Github Actions. Only authorized users may approve this workflow (notifications will be sent) to trigger the upload to PyPI. + +.. warning:: + + Uploads to PyPI can **never** be overwritten. If you make a mistake, you will need to bump the version and re-release the package. If the package uploaded to PyPI is broken, you should modify the GitHub release to mark the package as broken, as well as yank the package (mark the version "broken") on PyPI. + +Packaging +--------- + +When a new version has been minted (features have been successfully integrated test coverage and stability is adequate), maintainers should update the pip-installable package (wheel and source release) on PyPI as well as the binary on conda-forge. + +The simple approach +~~~~~~~~~~~~~~~~~~~ + +The simplest approach to packaging for general support (pip wheels) requires that ``flit`` be installed:: + + $ python -m pip install flit + +From the command line on your Linux distribution, simply run the following from the clone's main dev branch:: + + # To build the packages (sources and wheel) + $ python -m flit build + + # To upload to PyPI + $ python -m flit publish dist/* + +The new version based off of the version checked out will now be available via `pip` (`$ pip install xncml`). + +Releasing on conda-forge +~~~~~~~~~~~~~~~~~~~~~~~~ + +Initial Release +^^^^^^^^^^^^^^^ + +Before preparing an initial release on conda-forge, we *strongly* suggest consulting the following links: + * https://conda-forge.org/docs/maintainer/adding_pkgs.html + * https://github.com/conda-forge/staged-recipes + +In order to create a new conda build recipe, to be used when proposing packages to the conda-forge repository, we strongly suggest using the ``grayskull`` tool:: + + $ python -m pip install grayskull + $ grayskull pypi xncml + +For more information on ``grayskull``, please see the following link: https://github.com/conda/grayskull + +Before updating the main conda-forge recipe, we echo the conda-forge documentation and *strongly* suggest performing the following checks: + * Ensure that dependencies and dependency versions correspond with those of the tagged version, with open or pinned versions for the `host` requirements. + * If possible, configure tests within the conda-forge build CI (e.g. `imports: xncml`, `commands: pytest xncml`). + +Subsequent releases +^^^^^^^^^^^^^^^^^^^ + +If the conda-forge feedstock recipe is built from PyPI, then when a new release is published on PyPI, `regro-cf-autotick-bot` will open Pull Requests automatically on the conda-forge feedstock. It is up to the conda-forge feedstock maintainers to verify that the package is building properly before merging the Pull Request to the main branch. + +Building sources for wide support with `manylinux` image +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + This section is for building source files that link to or provide links to C/C++ dependencies. + It is not necessary to perform the following when building pure Python packages. + +In order to do ensure best compatibility across architectures, we suggest building wheels using the `PyPA`'s `manylinux` +docker images (at time of writing, we endorse using `manylinux_2_24_x86_64`). + +With `docker` installed and running, begin by pulling the image:: + + $ sudo docker pull quay.io/pypa/manylinux_2_24_x86_64 + +From the xncml source folder we can enter into the docker container, providing access to the `xncml` source files by linking them to the running image:: + + $ sudo docker run --rm -ti -v $(pwd):/xncml -w /xncml quay.io/pypa/manylinux_2_24_x86_64 bash + +Finally, to build the wheel, we run it against the provided Python3.9 binary:: + + $ /opt/python/cp39-cp39m/bin/python -m build --sdist --wheel + +This will then place two files in `xncml/dist/` ("xncml-1.2.3-py3-none-any.whl" and "xncml-1.2.3.tar.gz"). +We can now leave our docker container (`$ exit`) and continue with uploading the files to PyPI:: + + $ twine upload dist/* diff --git a/Makefile b/Makefile index 59937b7..56d3f2e 100644 --- a/Makefile +++ b/Makefile @@ -1,56 +1,99 @@ -.PHONY: help clean clean-pyc clean-build list test test-all coverage docs release sdist +.PHONY: clean clean-build clean-pyc clean-test coverage dist docs help install lint lint/flake8 +.DEFAULT_GOAL := help + +define BROWSER_PYSCRIPT +import os, webbrowser, sys + +from urllib.request import pathname2url + +webbrowser.open("file://" + pathname2url(os.path.abspath(sys.argv[1]))) +endef +export BROWSER_PYSCRIPT + +define PRINT_HELP_PYSCRIPT +import re, sys + +for line in sys.stdin: + match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line) + if match: + target, help = match.groups() + print("%-20s %s" % (target, help)) +endef +export PRINT_HELP_PYSCRIPT + +BROWSER := python -c "$$BROWSER_PYSCRIPT" help: - @echo "clean-build - remove build artifacts" - @echo "clean-pyc - remove Python file artifacts" - @echo "lint - check style with flake8" - @echo "test - run tests quickly with the default Python" - @echo "test-all - run tests on every Python version with tox" - @echo "coverage - check code coverage quickly with the default Python" - @echo "docs - generate Sphinx HTML documentation, including API docs" - @echo "release - package and upload a release" - @echo "sdist - package" - -clean: clean-build clean-pyc - -clean-build: + @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) + +clean: clean-build clean-pyc clean-test ## remove all build, test, coverage and Python artifacts + +clean-build: ## remove build artifacts rm -fr build/ rm -fr dist/ - rm -fr *.egg-info + rm -fr .eggs/ + find . -name '*.egg-info' -exec rm -fr {} + + find . -name '*.egg' -exec rm -f {} + + +clean-docs: ## remove docs artifacts + rm -f docs/apidoc/xncml*.rst + rm -f docs/apidoc/modules.rst + $(MAKE) -C docs clean -clean-pyc: +clean-pyc: ## remove Python file artifacts find . -name '*.pyc' -exec rm -f {} + find . -name '*.pyo' -exec rm -f {} + find . -name '*~' -exec rm -f {} + + find . -name '__pycache__' -exec rm -fr {} + + +clean-test: ## remove test and coverage artifacts + rm -fr .tox/ + rm -f .coverage + rm -fr htmlcov/ + rm -fr .pytest_cache + +lint/flake8: ## check style with flake8 + ruff xncml tests + flake8 --config=.flake8 xncml tests -lint: - flake8 xncml test +lint: lint/flake8 ## check style -test: - py.test +test: ## run tests quickly with the default Python + python -m pytest -test-all: +test-all: ## run tests on every Python version with tox tox -coverage: - coverage run --source xncml setup.py test +coverage: ## check code coverage quickly with the default Python + coverage run --source xncml -m pytest coverage report -m coverage html - open htmlcov/index.html + $(BROWSER) htmlcov/index.html -docs: - rm -f docs/xncml.rst - rm -f docs/modules.rst - sphinx-apidoc -o docs/ xncml - $(MAKE) -C docs clean +autodoc: clean-docs ## create sphinx-apidoc files: + sphinx-apidoc -o docs/apidoc --private --module-first xncml + +linkcheck: autodoc ## run checks over all external links found throughout the documentation + $(MAKE) -C docs linkcheck + +docs: autodoc ## generate Sphinx HTML documentation, including API docs $(MAKE) -C docs html - open docs/_build/html/index.html +ifndef READTHEDOCS + $(BROWSER) docs/_build/html/index.html +endif -release: clean - python setup.py sdist upload - python setup.py bdist_wheel upload +servedocs: docs ## compile the docs watching for changes + watchmedo shell-command -p '*.rst' -c '$(MAKE) -C docs html' -R -D . -sdist: clean - python setup.py sdist - python setup.py bdist_wheel upload +dist: clean ## builds source and wheel package + python -m flit build ls -l dist + +release: dist ## package and upload a release + python -m flit publish dist/* + +install: clean ## install the package to the active Python's site-packages + python -m flit install + +dev: clean ## install the package to the active Python's site-packages + python -m flit install --symlink diff --git a/README.md b/README.md deleted file mode 100644 index d4bf153..0000000 --- a/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# xncml - -![Read the Docs](https://img.shields.io/readthedocs/xncml) -![PyPI](https://img.shields.io/pypi/v/xncml) - - -Tools for opening and manipulating NcML (NetCDF Markup Language) files with/for xarray. - -These tools allow you to modify NcML by: - -- Adding or removing global attributes -- Adding or removing variable attributes -- Removing variables and dimensions - -and read NcML files into `xarray.Dataset` objects: - -```python -import xncml -ds = xncml.open_ncml("large_ensemble.ncml") -``` - -See [documentation] for more information. - -## Installation - -xncml can be installed from PyPI with pip: - -```bash -pip install xncml -``` - -[documentation]: https://xncml.readthedocs.io diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..1a4d843 --- /dev/null +++ b/README.rst @@ -0,0 +1,82 @@ +xncml +===== + +|pypi| |ruff| |docs| + +Tools for opening and manipulating NcML (NetCDF Markup Language) files with/for xarray. + +These tools allow you to modify NcML by: + +- Adding or removing global attributes +- Adding or removing variable attributes +- Removing variables and dimensions + +and read NcML files into `xarray.Dataset` objects: + +```python +import xncml +ds = xncml.open_ncml("large_ensemble.ncml") +``` + +See `doc`_ for more information. + + +Installation +============ + +Stable release +-------------- + +To install xncml, run this command in your terminal: + +.. code-block:: console + + $ python -m pip install xncml + +This is the preferred method to install xncml, as it will always install the most recent stable release. + +If you don't have `pip`_ installed, this `Python installation guide`_ can guide +you through the process. + +.. _pip: https://pip.pypa.io +.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ + + +From sources +------------ + +The sources for xncml can be downloaded from the `Github repo`_. + +You can either clone the public repository: + +.. code-block:: console + + $ git clone git@github.com:xarray-contrib/xncml/ + +Or download the `tarball`_: + +.. code-block:: console + + $ curl -OJL https://github.com/xarray-contrib/xncml/tarball/main + +Once you have a copy of the source, you can install it with: + +.. code-block:: console + + $ python -m pip install . + +.. _doc: https://readthedocs.org/projects/xncml +.. _Github repo: https://github.com/xarray-contrib/xncml/ +.. _tarball: https://github.com/xarray-contrib/xncml/tarball/main + +.. |docs| image:: https://readthedocs.org/projects/xncml/badge/?version=latest + :target: hhttps://xncml.readthedocs.io + :alt: Documentation Status + +.. |pypi| image:: https://img.shields.io/pypi/v/xncml.svg + :target: https://pypi.python.org/pypi/xncml + :alt: Python Package Index Build + +.. |ruff| image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json + :target: https://github.com/astral-sh/ruff + :alt: Ruff diff --git a/ci/doc.yml b/ci/doc.yml index 2464fb5..5501bd7 100644 --- a/ci/doc.yml +++ b/ci/doc.yml @@ -7,13 +7,14 @@ dependencies: - xmltodict - xsdata - xarray - - sphinx==5.3.0 - - nbsphinx==0.8.10 + - sphinx + - nbsphinx - sphinx-copybutton + - sphinx-codeautolink - numpydoc - sphinx-autosummary-accessors - ipython - - pip: - - myst_nb - - myst-parser==0.18.1 - - sphinx_rtd_theme==1.1.1 + - ipykernel + - jupyter_client + - sphinx-rtd-theme + - pandoc diff --git a/ci/environment-dev-3.7.yml b/ci/environment-dev-3.7.yml index 0faae11..b8450c2 100644 --- a/ci/environment-dev-3.7.yml +++ b/ci/environment-dev-3.7.yml @@ -17,11 +17,21 @@ dependencies: - pip - pytest - pytest-cov - - python=3.7 + - python=3.9 - recommonmark - sphinx_rtd_theme - - sphinx>=1.6 - - xarray + - sphinx + - bump-my-version + - watchdog + - flake8-rst-docstrings + - flake8-alphabetize + - flit + - tox + - coverage + - coveralls + - ruff + - pre-commit + - pip: - sphinx_copybutton - xmltodict diff --git a/docs/Makefile b/docs/Makefile index 9b5b604..235c735 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,177 +1,20 @@ -# Makefile for Sphinx documentation +# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = +SPHINXBUILD = python -msphinx +SPHINXPROJ = xncml +SOURCEDIR = . BUILDDIR = _build -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) -$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) -endif - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext - +# Put it first so that "make" without argument is like "make help". help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/complexity.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/complexity.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/complexity" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/complexity" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -latexpdfja: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." +.PHONY: help Makefile -pseudoxml: - $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/authors.rst b/docs/authors.rst new file mode 100644 index 0000000..e122f91 --- /dev/null +++ b/docs/authors.rst @@ -0,0 +1 @@ +.. include:: ../AUTHORS.rst diff --git a/docs/changelog.rst b/docs/changelog.rst new file mode 100644 index 0000000..565b052 --- /dev/null +++ b/docs/changelog.rst @@ -0,0 +1 @@ +.. include:: ../CHANGELOG.rst diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000..a496e44 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python +# +# xncml documentation build configuration file, created by +# sphinx-quickstart on Fri Jun 9 13:47:02 2017. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another +# directory, add these directories to sys.path here. If the directory is +# relative to the documentation root, use os.path.abspath to make it +# absolute, like shown here. +# +import os +import sys +sys.path.insert(0, os.path.abspath('..')) + +import xncml + +# -- General configuration --------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.viewcode', + 'sphinx.ext.autosummary', + 'sphinx.ext.doctest', + 'sphinx.ext.intersphinx', + 'sphinx.ext.extlinks', + 'numpydoc', + 'IPython.sphinxext.ipython_console_highlighting', + 'IPython.sphinxext.ipython_directive', + 'nbsphinx', + 'sphinx.ext.autosectionlabel', + 'sphinx.ext.todo', + 'sphinx_codeautolink', + 'sphinx_copybutton', +] + +autosectionlabel_prefix_document = True +autosectionlabel_maxdepth = 2 + +# To ensure that underlined fields (e.g. `_field`) are shown in the docs. +autodoc_default_options = { + 'members': True, + 'undoc-members': True, + 'private-members': False, + 'special-members': False, +} + +extlinks = { + 'issue': ('https://github.com/xarray-contrib/xncml/issues/%s', 'GH/%s'), + 'pull': ('https://github.com/xarray-contrib/xncml/pull/%s', 'PR/%s'), + 'user': ('https://github.com/%s', '@%s'), +} + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = ['.rst'] + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'xncml' +copyright = '2019-{}, University Corporation for Atmospheric Research' +author = 'Anderson Banihirwe' + +# The version info for the project you're documenting, acts as replacement +# for |version| and |release|, also used in various other places throughout +# the built documents. +# +# The short X.Y version. +version = xncml.__version__ +# The full version, including alpha/beta/rc tags. +release = xncml.__version__ + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a +# theme further. For a list of options available for each theme, see the +# documentation. +# +html_theme_options = {'style_external_links': True} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = [] + + +# -- Options for HTMLHelp output --------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'xncmldoc' + + +# -- Options for LaTeX output ------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass +# [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'xncml.tex', + 'xncml Documentation', + 'Anderson Banihirwe', 'manual'), +] + + +# -- Options for manual page output ------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'xncml', + 'xncml Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'xncml', + 'xncml Documentation', + author, + 'xncml', + 'One line description of project.', + 'Miscellaneous'), +] diff --git a/docs/contributing.rst b/docs/contributing.rst new file mode 100644 index 0000000..e582053 --- /dev/null +++ b/docs/contributing.rst @@ -0,0 +1 @@ +.. include:: ../CONTRIBUTING.rst diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..fa73bc9 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,37 @@ +.. module:: xncml + +Xncml Documentation +=================== + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + readme + tutorial + contributing + authors + changelog + +`xncml` adds NcML support to xarray. It includes utilities to modify NcML documents, +and open NcML files as `xarray.Dataset`. +For more information on NcML, take a look at +[tutorials and examples](https://docs.unidata.ucar.edu/netcdf-java/current/userguide/basic_ncml_tutorial.html) +and the [annotated schema](https://docs.unidata.ucar.edu/netcdf-java/current/userguide/annotated_ncml_schema.html). + + +Note the existence of a similar [project](https://github.com/ioos/ncml) +to edit NcML documents, now archived. + +Feedback +======== + +If you encounter any errors or problems with **xncml**, +please open an Issue at the GitHub main repository. + + +Indices and tables +================== +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/make.bat b/docs/make.bat index 2df9a8c..58f01ad 100644 --- a/docs/make.bat +++ b/docs/make.bat @@ -1,242 +1,36 @@ -@ECHO OFF - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set BUILDDIR=_build -set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . -set I18NSPHINXOPTS=%SPHINXOPTS% . -if NOT "%PAPER%" == "" ( - set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% - set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% -) - -if "%1" == "" goto help - -if "%1" == "help" ( - :help - echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files - echo. dirhtml to make HTML files named index.html in directories - echo. singlehtml to make a single large HTML file - echo. pickle to make pickle files - echo. json to make JSON files - echo. htmlhelp to make HTML files and a HTML help project - echo. qthelp to make HTML files and a qthelp project - echo. devhelp to make HTML files and a Devhelp project - echo. epub to make an epub - echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter - echo. text to make text files - echo. man to make manual pages - echo. texinfo to make Texinfo files - echo. gettext to make PO message catalogs - echo. changes to make an overview over all changed/added/deprecated items - echo. xml to make Docutils-native XML files - echo. pseudoxml to make pseudoxml-XML files for display purposes - echo. linkcheck to check all external links for integrity - echo. doctest to run all doctests embedded in the documentation if enabled - goto end -) - -if "%1" == "clean" ( - for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i - del /q /s %BUILDDIR%\* - goto end -) - - -%SPHINXBUILD% 2> nul -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -if "%1" == "html" ( - %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/html. - goto end -) - -if "%1" == "dirhtml" ( - %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. - goto end -) - -if "%1" == "singlehtml" ( - %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. - goto end -) - -if "%1" == "pickle" ( - %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the pickle files. - goto end -) - -if "%1" == "json" ( - %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the JSON files. - goto end -) - -if "%1" == "htmlhelp" ( - %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run HTML Help Workshop with the ^ -.hhp project file in %BUILDDIR%/htmlhelp. - goto end -) - -if "%1" == "qthelp" ( - %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run "qcollectiongenerator" with the ^ -.qhcp project file in %BUILDDIR%/qthelp, like this: - echo.^> qcollectiongenerator %BUILDDIR%\qthelp\complexity.qhcp - echo.To view the help file: - echo.^> assistant -collectionFile %BUILDDIR%\qthelp\complexity.ghc - goto end -) - -if "%1" == "devhelp" ( - %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. - goto end -) - -if "%1" == "epub" ( - %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The epub file is in %BUILDDIR%/epub. - goto end -) - -if "%1" == "latex" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "latexpdf" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - cd %BUILDDIR%/latex - make all-pdf - cd %BUILDDIR%/.. - echo. - echo.Build finished; the PDF files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "latexpdfja" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - cd %BUILDDIR%/latex - make all-pdf-ja - cd %BUILDDIR%/.. - echo. - echo.Build finished; the PDF files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "text" ( - %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The text files are in %BUILDDIR%/text. - goto end -) - -if "%1" == "man" ( - %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The manual pages are in %BUILDDIR%/man. - goto end -) - -if "%1" == "texinfo" ( - %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. - goto end -) - -if "%1" == "gettext" ( - %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The message catalogs are in %BUILDDIR%/locale. - goto end -) - -if "%1" == "changes" ( - %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes - if errorlevel 1 exit /b 1 - echo. - echo.The overview file is in %BUILDDIR%/changes. - goto end -) - -if "%1" == "linkcheck" ( - %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck - if errorlevel 1 exit /b 1 - echo. - echo.Link check complete; look for any errors in the above output ^ -or in %BUILDDIR%/linkcheck/output.txt. - goto end -) - -if "%1" == "doctest" ( - %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest - if errorlevel 1 exit /b 1 - echo. - echo.Testing of doctests in the sources finished, look at the ^ -results in %BUILDDIR%/doctest/output.txt. - goto end -) - -if "%1" == "xml" ( - %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The XML files are in %BUILDDIR%/xml. - goto end -) - -if "%1" == "pseudoxml" ( - %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. - goto end -) - -:end +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=python -msphinx +) +set SOURCEDIR=. +set BUILDDIR=_build +set SPHINXPROJ=xncml + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The Sphinx module was not found. Make sure you have Sphinx installed, + echo.then set the SPHINXBUILD environment variable to point to the full + echo.path of the 'sphinx-build' executable. Alternatively you may add the + echo.Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% + +:end +popd diff --git a/docs/readme.rst b/docs/readme.rst new file mode 100644 index 0000000..72a3355 --- /dev/null +++ b/docs/readme.rst @@ -0,0 +1 @@ +.. include:: ../README.rst diff --git a/docs/source/changelog.md b/docs/source/changelog.md deleted file mode 100644 index 0a1daac..0000000 --- a/docs/source/changelog.md +++ /dev/null @@ -1,6 +0,0 @@ -(changelog) = -# Changelog - -```{eval-rst} -.. include:: ../../CHANGELOG.md -``` diff --git a/docs/source/conf.py b/docs/source/conf.py deleted file mode 100644 index 28d89e3..0000000 --- a/docs/source/conf.py +++ /dev/null @@ -1,263 +0,0 @@ -# -*- coding: utf-8 -*- -# -# complexity documentation build configuration file, created by -# sphinx-quickstart on Tue Jul 9 22:26:36 2013. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import datetime -import xncml - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. - -# -- General configuration ----------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.viewcode', - 'sphinx.ext.autosummary', - 'sphinx.ext.doctest', - 'sphinx.ext.intersphinx', - 'sphinx.ext.extlinks', - 'numpydoc', - 'IPython.sphinxext.ipython_console_highlighting', - 'IPython.sphinxext.ipython_directive', - 'nbsphinx', - 'myst_parser', -] - -extlinks = { - 'issue': ('https://github.com/xarray-contrib/xncml/issues/%s', 'GH#'), - 'pr': ('https://github.com/xarray-contrib/xncml/pull/%s', 'GH#'), -} - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'xncml' -current_year = datetime.datetime.now().year -copyright = u'2019-{}, University Corporation for Atmospheric Research'.format(current_year) -author = u'Anderson Banihirwe' -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = xncml.__version__.split('+')[0] -# The full version, including alpha/beta/rc tags. -release = xncml.__version__ - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'sphinx_rtd_theme' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'xncmldoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [('index', 'xncml.tex', u'xncml Documentation', u'Anderson Banihirwe', 'manual')] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output -------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [('index', 'xncml', u'xncml Documentation', [author], 1)] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------------ - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - 'index', - 'xncml', - u'xncml Documentation', - author, - 'xncml', - 'One line description of project.', - 'Miscellaneous', - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False diff --git a/docs/source/index.md b/docs/source/index.md deleted file mode 100644 index f4b8bea..0000000 --- a/docs/source/index.md +++ /dev/null @@ -1,37 +0,0 @@ -```{eval-rst} -.. module:: xncml - -``` - -# Xncml Documentation - -`xncml` adds NcML support to xarray. It includes utilities to modify NcML documents, and open NcML files as `xarray.Dataset`. For more information on NcML, take a look at [tutorials and examples](https://docs.unidata.ucar.edu/netcdf-java/current/userguide/basic_ncml_tutorial.html) and the [annotated schema](https://docs.unidata.ucar.edu/netcdf-java/current/userguide/annotated_ncml_schema.html). - -## Installing - -`xncml` can be installed from PyPI with pip: - -```bash -pip install xncml -``` - -## Contents - -```{toctree} -:maxdepth: 2 - -tutorial -``` - -## Meta - -- [Changelog](changelog.md) -- [Github](https://github.com/xarray-contrib/xncml/) - -Note the existence of a similar [project](https://github.com/ioos/ncml) to edit NcML documents, now archived. - - -## Feedback - -If you encounter any errors or problems with **xncml**, -please open an Issue at the GitHub main repository. diff --git a/docs/source/tutorial.ipynb b/docs/tutorial.ipynb similarity index 99% rename from docs/source/tutorial.ipynb rename to docs/tutorial.ipynb index 8b03e9b..af861af 100644 --- a/docs/source/tutorial.ipynb +++ b/docs/tutorial.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# xncml Usage\n", + "# xncml usage\n", "\n", "xncml serves two purposes: modifying NcML files, and opening NcML files as an `xarray.Dataset`. " ] diff --git a/pyproject.toml b/pyproject.toml index 6e4a7a3..172b7ed 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [build-system] -requires = [ "setuptools>=60", "setuptools-scm>=8.0"] -build-backend = "setuptools.build_meta" +requires = ["flit_core >=3.8,<4"] +build-backend = "flit_core.buildapi" [project] name = "xncml" @@ -10,11 +10,12 @@ authors = [ maintainers = [ {name = "Anderson Banihirwe", email = "abanihi@ucar.edu"}, {name = "David Huard"}, + {name = "Abel Aoun"} ] description = "Tools for manipulating and opening NCML (NetCDF Markup) files with/for xarray" -readme = {file = "README.md", content-type = "text/markdown"} +readme = {file = "README.rst", content-type = "text/x-rst"} requires-python = ">=3.9.0" -keywords = ["xncml", "xarray", "netcdf", "ncml" ] +keywords = ["xncml", "xarray", "netcdf", "ncml"] license = {file = "LICENSE"} classifiers = [ "Development Status :: 4 - Beta", @@ -26,33 +27,195 @@ classifiers = [ "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: Implementation :: CPython", "Topic :: Scientific/Engineering :: Atmospheric Science" ] dynamic = ["version"] dependencies = [ - "xmltodict", - "xsdata", - "xarray", - "cftime", - "netCDF4", - "dask", - "psutil", - "setuptools", # explicitly required for python 3.12 + "xmltodict", + "xsdata", + "xarray", + "cftime", + "netCDF4", + "dask", + "psutil" ] [project.optional-dependencies] dev = [ - "pytest", - "flake8", + # Dev tools and testing + "pip >=23.1.2", + "bump-my-version >=0.18.3", + "watchdog >=3.0.0", + "flake8 >=6.1.0", + "flake8-alphabetize >=0.0.21", + "flake8-rst-docstrings >=0.3.0", + "flit >=3.9.0", + "tox >=4.5.1", + "coverage >=6.2.2,<7.0.0", + "coveralls >=3.3.1", + "pytest >=7.3.1", + "pytest-cov >=4.0.0", + "ruff >=0.2.0", + "pre-commit >=3.3.2" +] +docs = [ + # Documentation and examples + "sphinx", + "sphinx-codeautolink", + "sphinx-copybutton", + "sphinx-rtd-theme >=1.0", + "nbsphinx", + "pandoc", + "ipython", + "ipykernel", + "jupyter_client" ] [project.urls] -Homepage="https://github.com/xarray-contrib/xncml" -Issues="https://github.com/xarray-contrib/xncml/issues" -Changelog="https://github.com/xarray-contrib/xncml/blob/master/CHANGELOG.md" +"Source" = "https://github.com/xarray-contrib/xncml" +"Issue tracker" = "https://github.com/xarray-contrib/xncml/issues" +"Changelog" = "https://github.com/xarray-contrib/xncml/blob/master/CHANGELOG.md" + +[tool.bumpversion] +current_version = "0.5.0" +commit = true +commit_args = "--no-verify" +tag = false +tag_name = "v{new_version}" +allow_dirty = false +parse = "(?P\\d+)\\.(?P\\d+)\\.(?P\\d+)(\\-(?P[a-z]+)(\\.(?P\\d+)))?" +serialize = [ + "{major}.{minor}.{patch}-{release}.{build}", + "{major}.{minor}.{patch}" +] + +[[tool.bumpversion.files]] +filename = "xncml/__init__.py" +search = "__version__ = \"{current_version}\"" +replace = "__version__ = \"{new_version}\"" + +[[tool.bumpversion.files]] +filename = "tests/test_xncml.py" +search = "__version__ = \"{current_version}\"" +replace = "__version__ = \"{new_version}\"" + +[[tool.bumpversion.files]] +filename = ".cruft.json" +search = "\"version\": \"{current_version}\"" +replace = "\"version\": \"{new_version}\"" + +[tool.bumpversion.parts.build] +independent = false + +[tool.bumpversion.parts.release] +optional_value = "release" +values = [ + "dev", + "release" +] + +[tool.coverage.run] +relative_files = true +include = ["xncml/*"] +omit = ["tests/*.py"] + +[tool.flit.sdist] +include = [ + ".zenodo.json", + "AUTHORS.rst", + "CHANGELOG.rst", + "CONTRIBUTING.rst", + "LICENSE", + "Makefile", + "README.rst", + "environment-docs.yml", + "docs/_static/_images/*.gif", + "docs/_static/_images/*.jpg", + "docs/_static/_images/*.png", + "docs/_static/_images/*.rst", + "docs/Makefile", + "docs/conf.py", + "docs/make.bat", + "tests/*.py", + "tox.ini", + "xncml" +] +exclude = [ + "*.py[co]", + "__pycache__", + ".coveralls.yml", + ".editorconfig", + ".flake8", + ".gitignore", + ".pre-commit-config.yaml", + ".readthedocs.yml", + ".yamllint.yaml", + "docs/_*", + "docs/apidoc/modules.rst", + "docs/apidoc/xncml*.rst" +] + +[tool.mypy] +python_version = "3.9" +show_error_codes = true +warn_return_any = true +warn_unused_configs = true + +[[tool.mypy.overrides]] +module = [] +ignore_missing_imports = true + +[tool.ruff] +src = ["src"] +line-length = 150 +target-version = "py39" +exclude = [ + ".eggs", + ".git", + "build", + "docs" +] + +[tool.ruff.format] +# Enable reformatting of code snippets in docstrings. +docstring-code-format = true +line-ending = "auto" + +[tool.ruff.lint] +select = [ + "C9", + "D", + "E", + "F", + "W" +] +ignore = [ + "D205", + "D400", + "D401" +] + +[tool.ruff.lint.flake8-bandit] +check-typed-exception = true + +[tool.ruff.lint.isort] +known-first-party = ["xncml"] +case-sensitive = true +detect-same-package = false +lines-after-imports = 1 +no-lines-before = ["future", "standard-library"] + +[tool.ruff.lint.mccabe] +max-complexity = 15 + +[tool.ruff.lint.per-file-ignores] +"xncml/**/__init__.py" = ["F401", "F403"] -[tool.setuptools] -packages = ["xncml"] +[tool.ruff.lint.pycodestyle] +max-doc-length = 180 -[tool.setuptools_scm] -# empty but needed to ensure setuptools_scm is used +[tool.ruff.lint.pydocstyle] +convention = "numpy" diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index d9bcf3a..0000000 --- a/setup.cfg +++ /dev/null @@ -1,22 +0,0 @@ -[wheel] -universal = 1 - - -[flake8] -exclude = docs -ignore = E203,E266,E501,W503,F401,E722,E402,C901 -max-line-length = 100 -max-complexity = 18 -select = B,C,E,F,W,T4,B9 - -[isort] -known_first_party=xncml -known_third_party=numpy,pkg_resources,psutil,pytest,xarray,xmltodict,xsdata -multi_line_output=3 -include_trailing_comma=True -force_grid_wrap=0 -combine_as_imports=True -line_length=100 -skip= - docs/source/conf.py - setup.py diff --git a/src/xncml/__init__.py b/src/xncml/__init__.py new file mode 100644 index 0000000..10eae78 --- /dev/null +++ b/src/xncml/__init__.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python +"""Top-level module for xncml.""" + +from .core import Dataset # noqa +from .parser import open_ncml # noqa + +__version__ = "0.5.0" diff --git a/xncml/core.py b/src/xncml/core.py similarity index 61% rename from xncml/core.py rename to src/xncml/core.py index 9c04ebe..10e985e 100644 --- a/xncml/core.py +++ b/src/xncml/core.py @@ -21,7 +21,7 @@ def __init__(self, filepath: str = None, location: str = None): """ Parameters - ----------- + ---------- filepath : str File path to dataset NcML file. If it does not exist, an empty NcML document will be created and this will be the default filename when writing to disk with `to_ncml`. @@ -35,11 +35,9 @@ def __init__(self, filepath: str = None, location: str = None): else: self.ncroot = OrderedDict() - self.ncroot['netcdf'] = OrderedDict( - {'@xmlns': 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2'} - ) + self.ncroot["netcdf"] = OrderedDict({"@xmlns": "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2"}) if location is not None: - self.ncroot['netcdf']['@location'] = str(location) + self.ncroot["netcdf"]["@location"] = str(location) @classmethod def from_text(cls, xml: str): @@ -53,11 +51,11 @@ def _parse_xml(xml: str) -> dict: """Return dictionary from xml.""" return xmltodict.parse( xml, - force_list=['variable', 'attribute', 'group', 'dimension'], + force_list=["variable", "attribute", "group", "dimension"], process_namespaces=True, namespaces={ - 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2': None, - 'https://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2': None, + "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2": None, + "https://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2": None, }, ) @@ -65,9 +63,7 @@ def __repr__(self): return xmltodict.unparse(self.ncroot, pretty=True) # Aggregations and scans - def add_aggregation( - self, dim_name: str, type_: str, recheck_every: str = None, time_units_change: bool = None - ): + def add_aggregation(self, dim_name: str, type_: str, recheck_every: str = None, time_units_change: bool = None): """Add aggregation. Parameters @@ -84,22 +80,22 @@ def add_aggregation( at = AggregationType(type_) item = OrderedDict( { - '@dimName': dim_name, - '@type': at.value, - '@recheckEvery': recheck_every, - '@timeUnitsChange': time_units_change, + "@dimName": dim_name, + "@type": at.value, + "@recheckEvery": recheck_every, + "@timeUnitsChange": time_units_change, } ) item = preparse(item) - aggregations = self.ncroot['netcdf'].get('aggregation', []) + aggregations = self.ncroot["netcdf"].get("aggregation", []) for agg in aggregations: - if agg['@dimName'] == dim_name: + if agg["@dimName"] == dim_name: agg.update(item) break else: aggregations.append(item) - self.ncroot['netcdf']['aggregation'] = aggregations + self.ncroot["netcdf"]["aggregation"] = aggregations def add_variable_agg(self, dim_name: str, name: str): """Add variable aggregation. @@ -111,18 +107,18 @@ def add_variable_agg(self, dim_name: str, name: str): name : str Variable name. """ - item = OrderedDict({'@name': name}) - aggregations = self.ncroot['netcdf'].get('aggregation') + item = OrderedDict({"@name": name}) + aggregations = self.ncroot["netcdf"].get("aggregation") for agg in aggregations: - if agg['@dimName'] == dim_name: - variables = agg.get('variableAgg', []) + if agg["@dimName"] == dim_name: + variables = agg.get("variableAgg", []) for var in variables: - if var['@name'] == name: + if var["@name"] == name: var.update(item) break else: variables.append(item) - agg['variableAgg'] = variables + agg["variableAgg"] = variables def add_scan( self, @@ -159,30 +155,30 @@ def add_scan( """ item = OrderedDict( { - '@location': location, - '@regExp': reg_exp, - '@suffix': suffix, - '@subdirs': subdirs, - '@olderThan': older_than, - '@dateFormatMark': date_format_mark, - '@enhance': enhance, + "@location": location, + "@regExp": reg_exp, + "@suffix": suffix, + "@subdirs": subdirs, + "@olderThan": older_than, + "@dateFormatMark": date_format_mark, + "@enhance": enhance, } ) item = preparse(item) # An aggregation must exist for the scan to be added. - for agg in self.ncroot['netcdf'].get('aggregation'): - if agg['@dimName'] == dim_name: - scan = agg.get('scan', []) + for agg in self.ncroot["netcdf"].get("aggregation"): + if agg["@dimName"] == dim_name: + scan = agg.get("scan", []) scan.append(item) - agg['scan'] = scan + agg["scan"] = scan break else: - raise ValueError(f'No aggregation found for dimension {dim_name}.') + raise ValueError(f"No aggregation found for dimension {dim_name}.") # Variable - def add_variable_attribute(self, variable, key, value, type_='String'): + def add_variable_attribute(self, variable, key, value, type_="String"): """Add variable attribute. Parameters @@ -197,37 +193,37 @@ def add_variable_attribute(self, variable, key, value, type_='String'): String describing attribute type. """ - item = OrderedDict({'@name': key, '@type': type_, '@value': value}) - variables = self.ncroot['netcdf'].get('variable', []) + item = OrderedDict({"@name": key, "@type": type_, "@value": value}) + variables = self.ncroot["netcdf"].get("variable", []) for var in variables: - if var['@name'] == variable: - var_attributes = var.get('attribute', []) + if var["@name"] == variable: + var_attributes = var.get("attribute", []) for attr in var_attributes: - if attr['@name'] == key: + if attr["@name"] == key: attr.update(item) break else: var_attributes.append(item) - var['attribute'] = var_attributes + var["attribute"] = var_attributes break else: - variables.append(OrderedDict({'@name': variable, 'attribute': item})) - self.ncroot['netcdf']['variable'] = variables + variables.append(OrderedDict({"@name": variable, "attribute": item})) + self.ncroot["netcdf"]["variable"] = variables def remove_variable_attribute(self, variable, key): """Remove variable attribute""" - item = OrderedDict({'@name': key, '@type': 'attribute'}) - variables = self.ncroot['netcdf'].get('variable', []) + item = OrderedDict({"@name": key, "@type": "attribute"}) + variables = self.ncroot["netcdf"].get("variable", []) for var in variables: - if var['@name'] == variable: - var['remove'] = item + if var["@name"] == variable: + var["remove"] = item break else: - new_var = OrderedDict({'@name': variable, 'remove': item}) + new_var = OrderedDict({"@name": variable, "remove": item}) variables.append(new_var) - self.ncroot['netcdf']['variable'] = variables + self.ncroot["netcdf"]["variable"] = variables def rename_variable(self, variable, new_name): """Rename variable attribute @@ -240,17 +236,17 @@ def rename_variable(self, variable, new_name): New variable name. """ - item = OrderedDict({'@name': new_name, '@orgName': variable}) - variables = self.ncroot['netcdf'].get('variable', []) + item = OrderedDict({"@name": new_name, "@orgName": variable}) + variables = self.ncroot["netcdf"].get("variable", []) for var in variables: - if var['@name'] == variable: - var['@name'] = new_name - var['@orgName'] = variable + if var["@name"] == variable: + var["@name"] = new_name + var["@orgName"] = variable break else: variables.append(item) - self.ncroot['netcdf']['variable'] = variables + self.ncroot["netcdf"]["variable"] = variables def remove_variable(self, variable): """Remove dataset variable. @@ -260,12 +256,12 @@ def remove_variable(self, variable): key : str Name of the variable to remove. """ - item = OrderedDict({'@name': variable, '@type': 'variable'}) - removes = self.ncroot['netcdf'].get('remove', []) + item = OrderedDict({"@name": variable, "@type": "variable"}) + removes = self.ncroot["netcdf"].get("remove", []) if item not in removes: removes.append(item) - self.ncroot['netcdf']['remove'] = removes + self.ncroot["netcdf"]["remove"] = removes def rename_variable_attribute(self, variable, old_name, new_name): """Rename variable attribute. @@ -279,24 +275,24 @@ def rename_variable_attribute(self, variable, old_name, new_name): new_name : str New attribute name. """ - item = OrderedDict({'@name': new_name, '@orgName': old_name}) - variables = self.ncroot['netcdf'].get('variable', []) + item = OrderedDict({"@name": new_name, "@orgName": old_name}) + variables = self.ncroot["netcdf"].get("variable", []) for var in variables: - if var['@name'] == variable: - attrs = var.get('attribute', []) + if var["@name"] == variable: + attrs = var.get("attribute", []) for attr in attrs: - if attr['@name'] == old_name: - attr['@name'] = new_name - attr['@orgName'] = old_name + if attr["@name"] == old_name: + attr["@name"] = new_name + attr["@orgName"] = old_name break else: attrs.append(item) break else: - new_var = OrderedDict({'@name': 'variable', 'attribute': item}) + new_var = OrderedDict({"@name": "variable", "attribute": item}) variables.append(new_var) - self.ncroot['netcdf']['variable'] = variables + self.ncroot["netcdf"]["variable"] = variables # Dimensions @@ -310,23 +306,24 @@ def rename_dimension(self, dimension, new_name): new_name: str New dimension name. """ - item = OrderedDict({'@name': new_name, '@orgName': dimension}) - dimensions = self.ncroot['netcdf'].get('dimension', []) + item = OrderedDict({"@name": new_name, "@orgName": dimension}) + dimensions = self.ncroot["netcdf"].get("dimension", []) for dim in dimensions: - if dim['@name'] == dimension: - dim['@name'] = new_name - dim['@orgName'] = dimension + if dim["@name"] == dimension: + dim["@name"] = new_name + dim["@orgName"] = dimension break else: dimensions.append(item) - self.ncroot['netcdf']['dimensions'] = dimensions + self.ncroot["netcdf"]["dimensions"] = dimensions # Dataset - def add_dataset_attribute(self, key, value, type_='String'): + def add_dataset_attribute(self, key, value, type_="String"): """Add dataset attribute - Parameters + + Parameters ---------- key : str Attribute name. @@ -336,16 +333,16 @@ def add_dataset_attribute(self, key, value, type_='String'): String describing attribute type. """ - item = OrderedDict({'@name': key, '@type': type_, '@value': value}) - attributes = self.ncroot['netcdf'].get('attribute', []) + item = OrderedDict({"@name": key, "@type": type_, "@value": value}) + attributes = self.ncroot["netcdf"].get("attribute", []) for attr in attributes: - if attr['@name'] == key: + if attr["@name"] == key: attr.update(item) break else: attributes.append(item) - self.ncroot['netcdf']['attribute'] = attributes + self.ncroot["netcdf"]["attribute"] = attributes def remove_dataset_attribute(self, key): """Remove dataset attribute. @@ -356,17 +353,15 @@ def remove_dataset_attribute(self, key): Name of the attribute to remove. """ - removals = self.ncroot['netcdf'].get('remove', []) - item = OrderedDict({'@name': key, '@type': 'attribute'}) + removals = self.ncroot["netcdf"].get("remove", []) + item = OrderedDict({"@name": key, "@type": "attribute"}) if removals: - removals_keys = [ - removal['@name'] for removal in removals if removal['@type'] == 'attribute' - ] + removals_keys = [removal["@name"] for removal in removals if removal["@type"] == "attribute"] if key not in removals_keys: removals.append(item) else: - self.ncroot['netcdf']['remove'] = [item] + self.ncroot["netcdf"]["remove"] = [item] def rename_dataset_attribute(self, old_name, new_name): """Rename dataset attribute. @@ -378,24 +373,23 @@ def rename_dataset_attribute(self, old_name, new_name): new_name: str New attribute name. """ - - attributes = self.ncroot['netcdf'].get('attribute', None) - item = OrderedDict({'@name': new_name, 'orgName': old_name}) + attributes = self.ncroot["netcdf"].get("attribute", None) + item = OrderedDict({"@name": new_name, "orgName": old_name}) if attributes: if isinstance(attributes, (dict, OrderedDict)): attributes = [attributes] for attr in attributes: - if attr['@name'] == old_name: - attr['@name'] = new_name - attr['@orgName'] = old_name + if attr["@name"] == old_name: + attr["@name"] = new_name + attr["@orgName"] = old_name break else: - self.ncroot['netcdf']['attribute'] = [*attributes, item] + self.ncroot["netcdf"]["attribute"] = [*attributes, item] else: - self.ncroot['netcdf']['attribute'] = item + self.ncroot["netcdf"]["attribute"] = item def to_ncml(self, path=None): """Write NcML file to disk. @@ -412,7 +406,7 @@ def to_ncml(self, path=None): path = str(self.filepath) xml_output = xmltodict.unparse(self.ncroot, pretty=True) - with open(path, 'w') as fd: + with open(path, "w") as fd: fd.write(xml_output) def to_cf_dict(self): @@ -432,18 +426,18 @@ def to_cf_dict(self): http://cf-json.org/specification """ res = OrderedDict() - nc = self.ncroot['netcdf'] + nc = self.ncroot["netcdf"] for key, val in nc.items(): - if key[0] == '@': + if key[0] == "@": res[key] = val - if key == 'dimension': + if key == "dimension": res.update(_dims_to_json(val)) - if key == 'group': + if key == "group": res.update(_groups_to_json(val)) - if key == 'attribute': + if key == "attribute": res.update(_attributes_to_json(val)) - if key == 'variable': + if key == "variable": res.update(_variables_to_json(val)) return res @@ -453,23 +447,23 @@ def _dims_to_json(dims: list) -> dict: """The dimensions object has dimension id:size as its key:value members.""" out = OrderedDict() for dim in dims: - if int(dim['@length']) > 1: - out[dim['@name']] = int(dim['@length']) + if int(dim["@length"]) > 1: + out[dim["@name"]] = int(dim["@length"]) - return {'dimensions': out} + return {"dimensions": out} def _groups_to_json(groups: list) -> dict: out = OrderedDict() for group in groups: - name = group['@name'] + name = group["@name"] out[name] = OrderedDict() - if 'attribute' in group: - out[name].update(_attributes_to_json(group['attribute'])) - if 'group' in group: - out[name].update(_groups_to_json(group['group'])) + if "attribute" in group: + out[name].update(_attributes_to_json(group["attribute"])) + if "group" in group: + out[name].update(_groups_to_json(group["group"])) - return {'groups': out} + return {"groups": out} def _attributes_to_json(attrs: list) -> dict: @@ -477,11 +471,11 @@ def _attributes_to_json(attrs: list) -> dict: out = OrderedDict() for attr in attrs: try: - out[attr['@name']] = _cast(attr) + out[attr["@name"]] = _cast(attr) except ValueError as exc: warn(f"Could not cast {attr['@name']}:\n{exc}") - return {'attributes': out} + return {"attributes": out} def _variables_to_json(variables: list) -> dict: @@ -495,39 +489,39 @@ def _variables_to_json(variables: list) -> dict: # Put coordinate variables first for var in variables: if _is_coordinate(var): - out[var['@name']] = None + out[var["@name"]] = None for var in variables: - name = var['@name'] + name = var["@name"] out[name] = OrderedDict() - if '@shape' in var: - out[name]['shape'] = var['@shape'].split(' ') + if "@shape" in var: + out[name]["shape"] = var["@shape"].split(" ") - if '@type' in var: - out[name]['type'] = var['@type'] + if "@type" in var: + out[name]["type"] = var["@type"] - if 'attribute' in var: - out[name].update(_attributes_to_json(var['attribute'])) + if "attribute" in var: + out[name].update(_attributes_to_json(var["attribute"])) - if 'values' in var: - out[name]['data'] = _cast(var) + if "values" in var: + out[name]["data"] = _cast(var) - return {'variables': out} + return {"variables": out} def _cast(obj: dict) -> Any: """Cast attribute value to the appropriate type.""" from xncml.parser import DataType, nctype - value = obj.get('@value') or obj.get('values') - typ = DataType(obj.get('@type', 'String')) + value = obj.get("@value") or obj.get("values") + typ = DataType(obj.get("@type", "String")) if value is not None: if isinstance(value, str): if typ in [DataType.STRING, DataType.STRING_1]: return value - sep = ' ' + sep = " " values = value.split(sep) return list(map(nctype(typ), values)) elif isinstance(value, dict): @@ -538,36 +532,35 @@ def _cast(obj: dict) -> Any: def _is_coordinate(var): """Return True is variable is a coordinate.""" - # Variable is 1D and has same name as dimension - if var.get('@shape', '').split(' ') == [var['@name']]: + if var.get("@shape", "").split(" ") == [var["@name"]]: return True - lat_units = ['degrees_north', 'degreeN', 'degree_N', 'degree_north', 'degreesN', 'degrees_N'] - lon_units = ['degrees_east', 'degreeE', 'degree_E', 'degree_east', 'degreesE', 'degrees_E'] + lat_units = ["degrees_north", "degreeN", "degree_N", "degree_north", "degreesN", "degrees_N"] + lon_units = ["degrees_east", "degreeE", "degree_E", "degree_east", "degreesE", "degrees_E"] names = [ - 'latitude', - 'longitude', - 'time', - 'air_pressure', - 'altitude', - 'depth', - 'geopotential_height', - 'height', - 'height_above_geopotential_datum', - 'height_above_mean_sea_level', - 'height_above_reference_ellipsoid', + "latitude", + "longitude", + "time", + "air_pressure", + "altitude", + "depth", + "geopotential_height", + "height", + "height_above_geopotential_datum", + "height_above_mean_sea_level", + "height_above_reference_ellipsoid", ] - if 'attribute' in var: - attrs = _attributes_to_json(var['attribute']) + if "attribute" in var: + attrs = _attributes_to_json(var["attribute"]) # Check units - if attrs.get('units', '') in lon_units + lat_units: + if attrs.get("units", "") in lon_units + lat_units: return True # Check long_name and standard_name - if attrs.get('long_name', attrs.get('standard_name', '')) in names: + if attrs.get("long_name", attrs.get("standard_name", "")) in names: return True return False @@ -587,9 +580,9 @@ def preparse(obj: dict) -> dict: class AggregationType(Enum): """Type of aggregation.""" - FORECAST_MODEL_RUN_COLLECTION = 'forecastModelRunCollection' - FORECAST_MODEL_RUN_SINGLE_COLLECTION = 'forecastModelRunSingleCollection' - JOIN_EXISTING = 'joinExisting' - JOIN_NEW = 'joinNew' - TILED = 'tiled' - UNION = 'union' + FORECAST_MODEL_RUN_COLLECTION = "forecastModelRunCollection" + FORECAST_MODEL_RUN_SINGLE_COLLECTION = "forecastModelRunSingleCollection" + JOIN_EXISTING = "joinExisting" + JOIN_NEW = "joinNew" + TILED = "tiled" + UNION = "union" diff --git a/xncml/generated/__init__.py b/src/xncml/generated/__init__.py similarity index 50% rename from xncml/generated/__init__.py rename to src/xncml/generated/__init__.py index 9d4f1f2..17757b2 100644 --- a/xncml/generated/__init__.py +++ b/src/xncml/generated/__init__.py @@ -19,21 +19,21 @@ ) __all__ = [ - 'AggregationType', - 'DataType', - 'ObjectType', - 'Aggregation', - 'Attribute', - 'CacheVariable', - 'Dimension', - 'EnumTypedef', - 'Group', - 'LogicalReduce', - 'LogicalSection', - 'LogicalSlice', - 'Netcdf', - 'PromoteGlobalAttribute', - 'Remove', - 'Values', - 'Variable', + "AggregationType", + "DataType", + "ObjectType", + "Aggregation", + "Attribute", + "CacheVariable", + "Dimension", + "EnumTypedef", + "Group", + "LogicalReduce", + "LogicalSection", + "LogicalSlice", + "Netcdf", + "PromoteGlobalAttribute", + "Remove", + "Values", + "Variable", ] diff --git a/xncml/generated/ncml_2_2.py b/src/xncml/generated/ncml_2_2.py similarity index 53% rename from xncml/generated/ncml_2_2.py rename to src/xncml/generated/ncml_2_2.py index bd8e76c..fc4a182 100644 --- a/xncml/generated/ncml_2_2.py +++ b/src/xncml/generated/ncml_2_2.py @@ -4,58 +4,58 @@ from enum import Enum from typing import List, Optional, Type -__NAMESPACE__ = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2' +__NAMESPACE__ = "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2" class AggregationType(Enum): - FORECAST_MODEL_RUN_COLLECTION = 'forecastModelRunCollection' - FORECAST_MODEL_RUN_SINGLE_COLLECTION = 'forecastModelRunSingleCollection' - JOIN_EXISTING = 'joinExisting' - JOIN_NEW = 'joinNew' - TILED = 'tiled' - UNION = 'union' + FORECAST_MODEL_RUN_COLLECTION = "forecastModelRunCollection" + FORECAST_MODEL_RUN_SINGLE_COLLECTION = "forecastModelRunSingleCollection" + JOIN_EXISTING = "joinExisting" + JOIN_NEW = "joinNew" + TILED = "tiled" + UNION = "union" class DataType(Enum): - BYTE = 'byte' - CHAR = 'char' - SHORT = 'short' - INT = 'int' - LONG = 'long' - FLOAT = 'float' - DOUBLE = 'double' - STRING = 'String' - STRING_1 = 'string' - STRUCTURE = 'Structure' - SEQUENCE = 'Sequence' - OPAQUE = 'opaque' - ENUM1 = 'enum1' - ENUM2 = 'enum2' - ENUM4 = 'enum4' - UBYTE = 'ubyte' - USHORT = 'ushort' - UINT = 'uint' - ULONG = 'ulong' + BYTE = "byte" + CHAR = "char" + SHORT = "short" + INT = "int" + LONG = "long" + FLOAT = "float" + DOUBLE = "double" + STRING = "String" + STRING_1 = "string" + STRUCTURE = "Structure" + SEQUENCE = "Sequence" + OPAQUE = "opaque" + ENUM1 = "enum1" + ENUM2 = "enum2" + ENUM4 = "enum4" + UBYTE = "ubyte" + USHORT = "ushort" + UINT = "uint" + ULONG = "ulong" class ObjectType(Enum): - ATTRIBUTE = 'attribute' - DIMENSION = 'dimension' - VARIABLE = 'variable' - GROUP = 'group' + ATTRIBUTE = "attribute" + DIMENSION = "dimension" + VARIABLE = "variable" + GROUP = "group" @dataclass class CacheVariable: class Meta: - name = 'cacheVariable' - namespace = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2' + name = "cacheVariable" + namespace = "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2" name: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) @@ -63,48 +63,48 @@ class Meta: @dataclass class Dimension: class Meta: - name = 'dimension' - namespace = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2' + name = "dimension" + namespace = "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2" name: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) length: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) is_unlimited: bool = field( default=False, metadata={ - 'name': 'isUnlimited', - 'type': 'Attribute', + "name": "isUnlimited", + "type": "Attribute", }, ) is_variable_length: bool = field( default=False, metadata={ - 'name': 'isVariableLength', - 'type': 'Attribute', + "name": "isVariableLength", + "type": "Attribute", }, ) is_shared: bool = field( default=True, metadata={ - 'name': 'isShared', - 'type': 'Attribute', + "name": "isShared", + "type": "Attribute", }, ) org_name: Optional[str] = field( default=None, metadata={ - 'name': 'orgName', - 'type': 'Attribute', + "name": "orgName", + "type": "Attribute", }, ) @@ -112,15 +112,15 @@ class Meta: @dataclass class LogicalReduce: class Meta: - name = 'logicalReduce' - namespace = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2' + name = "logicalReduce" + namespace = "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2" dim_names: Optional[str] = field( default=None, metadata={ - 'name': 'dimNames', - 'type': 'Attribute', - 'required': True, + "name": "dimNames", + "type": "Attribute", + "required": True, }, ) @@ -128,14 +128,14 @@ class Meta: @dataclass class LogicalSection: class Meta: - name = 'logicalSection' - namespace = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2' + name = "logicalSection" + namespace = "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2" section: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) @@ -143,22 +143,22 @@ class Meta: @dataclass class LogicalSlice: class Meta: - name = 'logicalSlice' - namespace = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2' + name = "logicalSlice" + namespace = "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2" dim_name: Optional[str] = field( default=None, metadata={ - 'name': 'dimName', - 'type': 'Attribute', - 'required': True, + "name": "dimName", + "type": "Attribute", + "required": True, }, ) index: Optional[int] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) @@ -166,21 +166,21 @@ class Meta: @dataclass class PromoteGlobalAttribute: class Meta: - name = 'promoteGlobalAttribute' - namespace = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2' + name = "promoteGlobalAttribute" + namespace = "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2" name: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) org_name: Optional[str] = field( default=None, metadata={ - 'name': 'orgName', - 'type': 'Attribute', + "name": "orgName", + "type": "Attribute", }, ) @@ -188,46 +188,46 @@ class Meta: @dataclass class Values: class Meta: - name = 'values' - namespace = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2' + name = "values" + namespace = "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2" start: Optional[float] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) increment: Optional[float] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) npts: Optional[int] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) separator: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) from_attribute: Optional[str] = field( default=None, metadata={ - 'name': 'fromAttribute', - 'type': 'Attribute', + "name": "fromAttribute", + "type": "Attribute", }, ) content: List[object] = field( default_factory=list, metadata={ - 'type': 'Wildcard', - 'namespace': '##any', - 'mixed': True, + "type": "Wildcard", + "namespace": "##any", + "mixed": True, }, ) @@ -235,54 +235,54 @@ class Meta: @dataclass class Attribute: class Meta: - name = 'attribute' - namespace = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2' + name = "attribute" + namespace = "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2" name: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) type: DataType = field( default=DataType.STRING, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) value: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) separator: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) org_name: Optional[str] = field( default=None, metadata={ - 'name': 'orgName', - 'type': 'Attribute', + "name": "orgName", + "type": "Attribute", }, ) is_unsigned: Optional[bool] = field( default=None, metadata={ - 'name': 'isUnsigned', - 'type': 'Attribute', + "name": "isUnsigned", + "type": "Attribute", }, ) content: List[object] = field( default_factory=list, metadata={ - 'type': 'Wildcard', - 'namespace': '##any', - 'mixed': True, + "type": "Wildcard", + "namespace": "##any", + "mixed": True, }, ) @@ -290,32 +290,32 @@ class Meta: @dataclass class EnumTypedef: class Meta: - name = 'enumTypedef' - namespace = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2' + name = "enumTypedef" + namespace = "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2" name: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) type: DataType = field( default=DataType.ENUM1, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) content: List[object] = field( default_factory=list, metadata={ - 'type': 'Wildcard', - 'namespace': '##any', - 'mixed': True, - 'choices': ( + "type": "Wildcard", + "namespace": "##any", + "mixed": True, + "choices": ( { - 'name': 'enum', - 'type': Type['EnumTypedef.EnumType'], + "name": "enum", + "type": Type["EnumTypedef.EnumType"], }, ), }, @@ -326,16 +326,16 @@ class EnumType: key: Optional[int] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) content: List[object] = field( default_factory=list, metadata={ - 'type': 'Wildcard', - 'namespace': '##any', - 'mixed': True, + "type": "Wildcard", + "namespace": "##any", + "mixed": True, }, ) @@ -343,21 +343,21 @@ class EnumType: @dataclass class Remove: class Meta: - name = 'remove' - namespace = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2' + name = "remove" + namespace = "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2" type: Optional[ObjectType] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) name: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) @@ -365,84 +365,84 @@ class Meta: @dataclass class Variable: class Meta: - name = 'variable' - namespace = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2' + name = "variable" + namespace = "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2" attribute: List[Attribute] = field( default_factory=list, metadata={ - 'type': 'Element', + "type": "Element", }, ) values: Optional[Values] = field( default=None, metadata={ - 'type': 'Element', + "type": "Element", }, ) - variable: List['Variable'] = field( + variable: List["Variable"] = field( default_factory=list, metadata={ - 'type': 'Element', + "type": "Element", }, ) logical_section: Optional[LogicalSection] = field( default=None, metadata={ - 'name': 'logicalSection', - 'type': 'Element', + "name": "logicalSection", + "type": "Element", }, ) logical_slice: Optional[LogicalSlice] = field( default=None, metadata={ - 'name': 'logicalSlice', - 'type': 'Element', + "name": "logicalSlice", + "type": "Element", }, ) logical_reduce: Optional[LogicalReduce] = field( default=None, metadata={ - 'name': 'logicalReduce', - 'type': 'Element', + "name": "logicalReduce", + "type": "Element", }, ) remove: List[Remove] = field( default_factory=list, metadata={ - 'type': 'Element', + "type": "Element", }, ) name: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) type: Optional[DataType] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) typedef: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) shape: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) org_name: Optional[str] = field( default=None, metadata={ - 'name': 'orgName', - 'type': 'Attribute', + "name": "orgName", + "type": "Attribute", }, ) @@ -450,37 +450,37 @@ class Meta: @dataclass class Group: class Meta: - name = 'group' - namespace = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2' + name = "group" + namespace = "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2" choice: List[object] = field( default_factory=list, metadata={ - 'type': 'Elements', - 'choices': ( + "type": "Elements", + "choices": ( { - 'name': 'enumTypedef', - 'type': EnumTypedef, + "name": "enumTypedef", + "type": EnumTypedef, }, { - 'name': 'dimension', - 'type': Dimension, + "name": "dimension", + "type": Dimension, }, { - 'name': 'variable', - 'type': Variable, + "name": "variable", + "type": Variable, }, { - 'name': 'attribute', - 'type': Attribute, + "name": "attribute", + "type": Attribute, }, { - 'name': 'group', - 'type': Type['Group'], + "name": "group", + "type": Type["Group"], }, { - 'name': 'remove', - 'type': Remove, + "name": "remove", + "type": Remove, }, ), }, @@ -488,15 +488,15 @@ class Meta: name: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) org_name: Optional[str] = field( default=None, metadata={ - 'name': 'orgName', - 'type': 'Attribute', + "name": "orgName", + "type": "Attribute", }, ) @@ -504,110 +504,110 @@ class Meta: @dataclass class Aggregation: class Meta: - name = 'aggregation' - namespace = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2' + name = "aggregation" + namespace = "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2" choice: List[object] = field( default_factory=list, metadata={ - 'type': 'Elements', - 'choices': ( + "type": "Elements", + "choices": ( { - 'name': 'group', - 'type': Group, + "name": "group", + "type": Group, }, { - 'name': 'dimension', - 'type': Dimension, + "name": "dimension", + "type": Dimension, }, { - 'name': 'variable', - 'type': Variable, + "name": "variable", + "type": Variable, }, { - 'name': 'attribute', - 'type': Attribute, + "name": "attribute", + "type": Attribute, }, { - 'name': 'remove', - 'type': Remove, + "name": "remove", + "type": Remove, }, ), }, ) - variable_agg: List['Aggregation.VariableAgg'] = field( + variable_agg: List["Aggregation.VariableAgg"] = field( default_factory=list, metadata={ - 'name': 'variableAgg', - 'type': 'Element', + "name": "variableAgg", + "type": "Element", }, ) promote_global_attribute: List[PromoteGlobalAttribute] = field( default_factory=list, metadata={ - 'name': 'promoteGlobalAttribute', - 'type': 'Element', + "name": "promoteGlobalAttribute", + "type": "Element", }, ) cache_variable: List[CacheVariable] = field( default_factory=list, metadata={ - 'name': 'cacheVariable', - 'type': 'Element', + "name": "cacheVariable", + "type": "Element", }, ) - netcdf: List['Netcdf'] = field( + netcdf: List["Netcdf"] = field( default_factory=list, metadata={ - 'type': 'Element', + "type": "Element", }, ) - scan: List['Aggregation.Scan'] = field( + scan: List["Aggregation.Scan"] = field( default_factory=list, metadata={ - 'type': 'Element', + "type": "Element", }, ) - scan_fmrc: List['Aggregation.ScanFmrc'] = field( + scan_fmrc: List["Aggregation.ScanFmrc"] = field( default_factory=list, metadata={ - 'name': 'scanFmrc', - 'type': 'Element', + "name": "scanFmrc", + "type": "Element", }, ) type: Optional[AggregationType] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) dim_name: Optional[str] = field( default=None, metadata={ - 'name': 'dimName', - 'type': 'Attribute', + "name": "dimName", + "type": "Attribute", }, ) recheck_every: Optional[str] = field( default=None, metadata={ - 'name': 'recheckEvery', - 'type': 'Attribute', + "name": "recheckEvery", + "type": "Attribute", }, ) time_units_change: Optional[bool] = field( default=None, metadata={ - 'name': 'timeUnitsChange', - 'type': 'Attribute', + "name": "timeUnitsChange", + "type": "Attribute", }, ) fmrc_definition: Optional[str] = field( default=None, metadata={ - 'name': 'fmrcDefinition', - 'type': 'Attribute', + "name": "fmrcDefinition", + "type": "Attribute", }, ) @@ -616,8 +616,8 @@ class VariableAgg: name: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) @@ -626,47 +626,47 @@ class Scan: location: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) reg_exp: Optional[str] = field( default=None, metadata={ - 'name': 'regExp', - 'type': 'Attribute', + "name": "regExp", + "type": "Attribute", }, ) suffix: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) subdirs: bool = field( default=True, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) older_than: Optional[str] = field( default=None, metadata={ - 'name': 'olderThan', - 'type': 'Attribute', + "name": "olderThan", + "type": "Attribute", }, ) date_format_mark: Optional[str] = field( default=None, metadata={ - 'name': 'dateFormatMark', - 'type': 'Attribute', + "name": "dateFormatMark", + "type": "Attribute", }, ) enhance: Optional[bool] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) @@ -675,55 +675,55 @@ class ScanFmrc: location: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', - 'required': True, + "type": "Attribute", + "required": True, }, ) reg_exp: Optional[str] = field( default=None, metadata={ - 'name': 'regExp', - 'type': 'Attribute', + "name": "regExp", + "type": "Attribute", }, ) suffix: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) subdirs: bool = field( default=True, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) older_than: Optional[str] = field( default=None, metadata={ - 'name': 'olderThan', - 'type': 'Attribute', + "name": "olderThan", + "type": "Attribute", }, ) run_date_matcher: Optional[str] = field( default=None, metadata={ - 'name': 'runDateMatcher', - 'type': 'Attribute', + "name": "runDateMatcher", + "type": "Attribute", }, ) forecast_date_matcher: Optional[str] = field( default=None, metadata={ - 'name': 'forecastDateMatcher', - 'type': 'Attribute', + "name": "forecastDateMatcher", + "type": "Attribute", }, ) forecast_offset_matcher: Optional[str] = field( default=None, metadata={ - 'name': 'forecastOffsetMatcher', - 'type': 'Attribute', + "name": "forecastOffsetMatcher", + "type": "Attribute", }, ) @@ -731,61 +731,61 @@ class ScanFmrc: @dataclass class Netcdf: class Meta: - name = 'netcdf' - namespace = 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2' + name = "netcdf" + namespace = "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2" read_metadata: Optional[object] = field( default=None, metadata={ - 'name': 'readMetadata', - 'type': 'Element', + "name": "readMetadata", + "type": "Element", }, ) explicit: Optional[object] = field( default=None, metadata={ - 'type': 'Element', + "type": "Element", }, ) iosp_param: Optional[object] = field( default=None, metadata={ - 'name': 'iospParam', - 'type': 'Element', + "name": "iospParam", + "type": "Element", }, ) choice: List[object] = field( default_factory=list, metadata={ - 'type': 'Elements', - 'choices': ( + "type": "Elements", + "choices": ( { - 'name': 'enumTypedef', - 'type': EnumTypedef, + "name": "enumTypedef", + "type": EnumTypedef, }, { - 'name': 'group', - 'type': Group, + "name": "group", + "type": Group, }, { - 'name': 'dimension', - 'type': Dimension, + "name": "dimension", + "type": Dimension, }, { - 'name': 'variable', - 'type': Variable, + "name": "variable", + "type": Variable, }, { - 'name': 'attribute', - 'type': Attribute, + "name": "attribute", + "type": Attribute, }, { - 'name': 'remove', - 'type': Remove, + "name": "remove", + "type": Remove, }, { - 'name': 'aggregation', - 'type': Aggregation, + "name": "aggregation", + "type": Aggregation, }, ), }, @@ -793,70 +793,70 @@ class Meta: location: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) id: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) title: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) enhance: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) add_records: Optional[bool] = field( default=None, metadata={ - 'name': 'addRecords', - 'type': 'Attribute', + "name": "addRecords", + "type": "Attribute", }, ) iosp: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) iosp_param_attribute: Optional[str] = field( default=None, metadata={ - 'name': 'iospParam', - 'type': 'Attribute', + "name": "iospParam", + "type": "Attribute", }, ) buffer_size: Optional[int] = field( default=None, metadata={ - 'name': 'bufferSize', - 'type': 'Attribute', + "name": "bufferSize", + "type": "Attribute", }, ) ncoords: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) coord_value: Optional[str] = field( default=None, metadata={ - 'name': 'coordValue', - 'type': 'Attribute', + "name": "coordValue", + "type": "Attribute", }, ) section: Optional[str] = field( default=None, metadata={ - 'type': 'Attribute', + "type": "Attribute", }, ) diff --git a/xncml/parser.py b/src/xncml/parser.py similarity index 86% rename from xncml/parser.py rename to src/xncml/parser.py index 274b916..7d67130 100644 --- a/xncml/parser.py +++ b/src/xncml/parser.py @@ -60,12 +60,12 @@ if TYPE_CHECKING: from collections.abc import Iterator -__author__ = 'David Huard, Abel Aoun' -__date__ = 'July 2022' -__contact__ = 'huard.david@ouranos.ca' +__author__ = "David Huard, Abel Aoun" +__date__ = "July 2022" +__contact__ = "huard.david@ouranos.ca" -FLATTEN_GROUPS = '*' -ROOT_GROUP = '/' +FLATTEN_GROUPS = "*" +ROOT_GROUP = "/" def parse(path: Path) -> Netcdf: @@ -111,9 +111,7 @@ def open_ncml(ncml: str | Path, group: str = ROOT_GROUP) -> xr.Dataset: return read_netcdf(xr.Dataset(), xr.Dataset(), obj, ncml, group) -def read_netcdf( - target: xr.Dataset, ref: xr.Dataset, obj: Netcdf, ncml: Path, group: str -) -> xr.Dataset: +def read_netcdf(target: xr.Dataset, ref: xr.Dataset, obj: Netcdf, ncml: Path, group: str) -> xr.Dataset: """ Return content of element. @@ -152,8 +150,8 @@ def read_netcdf( if group == FLATTEN_GROUPS: target = _flatten_groups(target, ref, obj) else: - if not group.startswith('/'): - group = f'/{group}' + if not group.startswith("/"): + group = f"/{group}" target = read_group(target, ref, obj, groups_to_read=[group]) return target @@ -191,7 +189,7 @@ def read_aggregation(target: xr.Dataset, obj: Aggregation, ncml: Path) -> xr.Dat for item in obj.netcdf: # Open dataset defined in 's `location` attribute tar = read_netcdf(xr.Dataset(), ref=xr.Dataset(), obj=item, ncml=ncml, group=ROOT_GROUP) - closers.append(getattr(tar, '_close')) + closers.append(getattr(tar, "_close")) # Select variables if names: @@ -208,7 +206,7 @@ def read_aggregation(target: xr.Dataset, obj: Aggregation, ncml: Path) -> xr.Dat for item in obj.scan: dss = read_scan(item, ncml) datasets.extend([ds.chunk() for ds in dss]) - closers.extend([getattr(ds, '_close') for ds in dss]) + closers.extend([getattr(ds, "_close") for ds in dss]) # Need to decode time variable if obj.time_units_change: @@ -228,7 +226,7 @@ def read_aggregation(target: xr.Dataset, obj: Aggregation, ncml: Path) -> xr.Dat raise NotImplementedError agg = read_group(agg, ref=None, obj=obj, groups_to_read=[ROOT_GROUP]) - out = target.merge(agg, combine_attrs='no_conflicts') + out = target.merge(agg, combine_attrs="no_conflicts") out.set_close(partial(_multi_file_closer, closers)) return out @@ -252,9 +250,9 @@ def read_ds(obj: Netcdf, ncml: Path) -> xr.Dataset: if obj.location: try: # Python >= 3.9 - location = obj.location.removeprefix('file:') + location = obj.location.removeprefix("file:") except AttributeError: - location = obj.location.strip('file:') + location = obj.location.strip("file:") if not Path(location).is_absolute(): location = ncml.parent / location @@ -263,7 +261,7 @@ def read_ds(obj: Netcdf, ncml: Path) -> xr.Dataset: def _get_leaves(group: Netcdf | Group, parent: str | None = None) -> Iterator[str]: group_children = [child for child in group.choice if isinstance(child, Group)] - current_path = ROOT_GROUP if parent is None else f'{parent}{group.name}/' + current_path = ROOT_GROUP if parent is None else f"{parent}{group.name}/" if len(group_children) == 0: yield current_path for child in group_children: @@ -333,7 +331,7 @@ def read_group( target, ref, item, - parent_group_path=f'{parent_group_path}{item.name}/', + parent_group_path=f"{parent_group_path}{item.name}/", dims=dims, groups_to_read=groups_to_read, ) @@ -373,21 +371,21 @@ def read_scan(obj: Aggregation.Scan, ncml: Path) -> list[xr.Dataset]: if not path.is_absolute(): path = ncml.parent / path - files = list(path.rglob('*') if obj.subdirs else path.glob('*')) + files = list(path.rglob("*") if obj.subdirs else path.glob("*")) if not files: - raise ValueError(f'No files found in {path}') + raise ValueError(f"No files found in {path}") fns = map(str, files) if obj.reg_exp: pat = re.compile(obj.reg_exp) files = list(filter(pat.match, fns)) elif obj.suffix: - pat = '*' + obj.suffix + pat = "*" + obj.suffix files = glob.fnmatch.filter(fns, pat) if not files: - raise ValueError('regular expression or suffix matches no file.') + raise ValueError("regular expression or suffix matches no file.") files.sort() @@ -422,7 +420,7 @@ def read_coord_value(nc: Netcdf, agg: Aggregation, dtypes: list = ()): if agg.type == AggregationType.JOIN_NEW: coord = val elif agg.type == AggregationType.JOIN_EXISTING: - coord = val.replace(',', ' ').split() + coord = val.replace(",", " ").split() else: raise NotImplementedError @@ -431,7 +429,7 @@ def read_coord_value(nc: Netcdf, agg: Aggregation, dtypes: list = ()): typ = dtypes[0] else: try: - dt.datetime.strptime(coord, '%Y-%m-%d %H:%M:%SZ') + dt.datetime.strptime(coord, "%Y-%m-%d %H:%M:%SZ") typ = str except ValueError: typ = np.float64 @@ -509,16 +507,12 @@ def read_variable( var_name = obj.name # Read existing data or create empty DataArray - if (existing_var := target.get(var_name)) is not None and existing_var.attrs.get( - 'group_path' - ) in [None, group_path]: + if (existing_var := target.get(var_name)) is not None and existing_var.attrs.get("group_path") in [None, group_path]: out = xr.as_variable(target[var_name]) if obj.type: out = out.astype(nctype(obj.type)) ref_var = None - elif (existing_var := ref.get(var_name)) is not None and existing_var.attrs.get( - 'group_path' - ) in [None, group_path]: + elif (existing_var := ref.get(var_name)) is not None and existing_var.attrs.get("group_path") in [None, group_path]: out = xr.as_variable(ref[var_name]) if obj.type: out = out.astype(nctype(obj.type)) @@ -526,35 +520,32 @@ def read_variable( elif obj.shape: var_dims = [] shape = [] - for dim in obj.shape.split(' '): + for dim in obj.shape.split(" "): if dimensions.get(dim) is None: - err = ( - f"Unknown dimension '{dim}'." - ' Make sure it is declared before being used in the NCML.' - ) + err = f"Unknown dimension '{dim}'." " Make sure it is declared before being used in the NCML." raise ValueError(err) shape.append(dimensions[dim][-1].length) if (dim_count := len(dimensions[dim])) > 1: - dim = f'{dim}__{dim_count - 1}' + dim = f"{dim}__{dim_count - 1}" var_dims.append(dim) out = xr.Variable(data=np.empty(shape, dtype=nctype(obj.type)), dims=var_dims) - elif obj.shape == '': + elif obj.shape == "": out = build_scalar_variable(var_name=var_name, values_tag=obj.values, var_type=obj.type) else: - error_msg = f'Could not build variable `{var_name }`.' + error_msg = f"Could not build variable `{var_name }`." raise ValueError(error_msg) # Set variable attributes for item in obj.attribute: read_attribute(out, item, ref=ref_var) - out.attrs['group_path'] = group_path + out.attrs["group_path"] = group_path # Remove attributes or dimensions for item in obj.remove: read_remove(out, item) # Read values for arrays (already done for a scalar) - if obj.values and obj.shape != '': + if obj.values and obj.shape != "": data = read_values(var_name, out.size, obj.values) data = out.dtype.type(data) out = xr.Variable( @@ -574,21 +565,17 @@ def read_variable( if obj.typedef in enums.keys(): dtype = out.dtype - new_dtype = np.dtype(dtype, metadata={'enum': enums[obj.typedef], 'enum_name': obj.typedef}) - out.encoding['dtype'] = new_dtype + new_dtype = np.dtype(dtype, metadata={"enum": enums[obj.typedef], "enum_name": obj.typedef}) + out.encoding["dtype"] = new_dtype out = out.astype(new_dtype) elif obj.typedef is not None: raise NotImplementedError import re - reg = re.compile(f'^{var_name}__|{var_name}') - similar_vars_but_diff_path = [ - v - for v in target.data_vars - if reg.match(v) and target[v].attrs.get('group_path') not in [None, group_path] - ] + reg = re.compile(f"^{var_name}__|{var_name}") + similar_vars_but_diff_path = [v for v in target.data_vars if reg.match(v) and target[v].attrs.get("group_path") not in [None, group_path]] if len(similar_vars_but_diff_path) > 0: - var_name = f'{var_name}__{len(similar_vars_but_diff_path)}' + var_name = f"{var_name}__{len(similar_vars_but_diff_path)}" target[var_name] = out return target @@ -611,34 +598,24 @@ def read_values(var_name: str, expected_size: int, values_tag: Values) -> list: A list filled with values from element. """ if values_tag.from_attribute is not None: - error_msg = ( - 'xncml cannot yet fetch values from a global or a ' - ' variable attribute using , here on variable' - f' {var_name}.' - ) + error_msg = "xncml cannot yet fetch values from a global or a " " variable attribute using , here on variable" f" {var_name}." raise NotImplementedError(error_msg) if values_tag.start is not None and values_tag.increment is not None: number_of_values = int(values_tag.npts or expected_size) return values_tag.start + np.arange(number_of_values) * values_tag.increment if not isinstance(values_tag.content, list): - error_msg = f'Unsupported format of the tag from variable {var_name}.' + error_msg = f"Unsupported format of the tag from variable {var_name}." raise NotImplementedError(error_msg) if len(values_tag.content) == 0: - error_msg = ( - f'No values found for variable {var_name}, but a {expected_size}' - ' values were expected.' - ) + error_msg = f"No values found for variable {var_name}, but a {expected_size}" " values were expected." raise ValueError(error_msg) if not isinstance(values_tag.content[0], str): - error_msg = f'Unsupported format of the tag from variable {var_name}.' + error_msg = f"Unsupported format of the tag from variable {var_name}." raise NotImplementedError(error_msg) - separator = values_tag.separator or ' ' + separator = values_tag.separator or " " data = values_tag.content[0].split(separator) if len(data) > expected_size: - error_msg = ( - f'The expected size for variable {var_name} was {expected_size},' - f' but {len(data)} values were found in its tag.' - ) + error_msg = f"The expected size for variable {var_name} was {expected_size}," f" but {len(data)} values were found in its tag." raise ValueError(error_msg) return data @@ -668,19 +645,16 @@ def build_scalar_variable(var_name: str, values_tag: Values, var_type: str) -> x if values_tag is None: default_value = nctype(var_type)() warn( - f'The scalar variable {var_name} has no values set within' - f' . A default value of {default_value} is set' - ' to preserve the type.' + f"The scalar variable {var_name} has no values set within" + f" . A default value of {default_value} is set" + " to preserve the type." ) return xr.Variable(data=default_value, dims=()) values_content = read_values(var_name, expected_size=1, values_tag=values_tag) if len(values_content) == 1: return xr.Variable(data=np.array(values_content[0], dtype=nctype(var_type))[()], dims=()) if len(values_content) > 1: - error_msg = ( - f'Multiple values found for variable {var_name} but its' - ' shape is "" thus a single scalar is expected within its tag.' - ) + error_msg = f"Multiple values found for variable {var_name} but its" ' shape is "" thus a single scalar is expected within its tag.' raise ValueError(error_msg) @@ -699,7 +673,6 @@ def read_remove(target: xr.Dataset | xr.Variable, obj: Remove) -> xr.Dataset: xr.Dataset or xr.Variable Dataset with attribute, variable or dimension removed, or variable with attribute removed. """ - if obj.type == ObjectType.ATTRIBUTE: target.attrs.pop(obj.name) elif obj.type == ObjectType.VARIABLE: @@ -746,7 +719,6 @@ def read_dimension(obj: Dimension) -> Dimension: def nctype(typ: DataType) -> type: """Return Python type corresponding to the NcML DataType of object.""" - if typ in [DataType.STRING, DataType.STRING_1]: return str elif typ in [DataType.BYTE, DataType.ENUM1]: @@ -780,10 +752,10 @@ def cast(obj: Attribute) -> tuple | str: if obj.type in [DataType.STRING, DataType.STRING_1]: return value - sep = obj.separator or ' ' + sep = obj.separator or " " values = value.split(sep) return tuple(map(nctype(obj.type), values)) - return '' + return "" def filter_by_class(iterable, klass): diff --git a/tests/test_core.py b/tests/test_core.py index 4677d0e..23909c5 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -8,7 +8,7 @@ import xncml here = os.path.abspath(os.path.dirname(__file__)) -input_file = Path(here) / 'data' / 'exercise1.ncml' +input_file = Path(here) / "data" / "exercise1.ncml" def test_ncml_dataset_constructor(): @@ -16,78 +16,76 @@ def test_ncml_dataset_constructor(): nc = xncml.Dataset(input_file) expected = OrderedDict( [ - ('@name', 'T'), - ('@shape', 'time lat lon'), - ('@type', 'double'), + ("@name", "T"), + ("@shape", "time lat lon"), + ("@type", "double"), ( - 'attribute', + "attribute", [ OrderedDict( [ - ('@name', 'long_name'), - ('@type', 'String'), - ('@value', 'surface temperature'), + ("@name", "long_name"), + ("@type", "String"), + ("@value", "surface temperature"), ] ), - OrderedDict([('@name', 'units'), ('@type', 'String'), ('@value', 'C')]), + OrderedDict([("@name", "units"), ("@type", "String"), ("@value", "C")]), ], ), ] ) - res = nc.ncroot['netcdf']['variable'][1] + res = nc.ncroot["netcdf"]["variable"][1] assert res == expected # Test with non-existing NcML - nc = xncml.Dataset('example.ncml') - assert '@xmlns' in nc.ncroot['netcdf'] + nc = xncml.Dataset("example.ncml") + assert "@xmlns" in nc.ncroot["netcdf"] # Test with non-exising NcML and location - nc = xncml.Dataset('example.ncml', location=Path(here) / 'data' / 'nc' / 'example1.nc') - assert 'example1.nc' in nc.ncroot['netcdf']['@location'] + nc = xncml.Dataset("example.ncml", location=Path(here) / "data" / "nc" / "example1.nc") + assert "example1.nc" in nc.ncroot["netcdf"]["@location"] # Test with namespace - nc = xncml.Dataset(Path(here) / 'data' / 'testReadHttps.xml') - assert nc.ncroot['netcdf']['attribute'][0]['@value'] == 'Example Data' + nc = xncml.Dataset(Path(here) / "data" / "testReadHttps.xml") + assert nc.ncroot["netcdf"]["attribute"][0]["@value"] == "Example Data" def test_add_variable_attribute(): nc = xncml.Dataset(input_file) - nc.add_variable_attribute(variable='T', key='container', value='ndarray') + nc.add_variable_attribute(variable="T", key="container", value="ndarray") expected = OrderedDict( [ - ('@name', 'T'), - ('@shape', 'time lat lon'), - ('@type', 'double'), + ("@name", "T"), + ("@shape", "time lat lon"), + ("@type", "double"), ( - 'attribute', + "attribute", [ OrderedDict( [ - ('@name', 'long_name'), - ('@type', 'String'), - ('@value', 'surface temperature'), + ("@name", "long_name"), + ("@type", "String"), + ("@value", "surface temperature"), ] ), - OrderedDict([('@name', 'units'), ('@type', 'String'), ('@value', 'C')]), - OrderedDict( - [('@name', 'container'), ('@type', 'String'), ('@value', 'ndarray')] - ), + OrderedDict([("@name", "units"), ("@type", "String"), ("@value", "C")]), + OrderedDict([("@name", "container"), ("@type", "String"), ("@value", "ndarray")]), ], ), ] ) - res = nc.ncroot['netcdf']['variable'][1] + res = nc.ncroot["netcdf"]["variable"][1] assert res == expected - nc.add_variable_attribute(variable='Tasmax', key='units', value='kelvin') - res = nc.ncroot['netcdf']['variable'][5] + nc.add_variable_attribute(variable="Tasmax", key="units", value="kelvin") + res = nc.ncroot["netcdf"]["variable"][5] expected = OrderedDict( [ - ('@name', 'Tasmax'), + ("@name", "Tasmax"), ( - 'attribute', - OrderedDict([('@name', 'units'), ('@type', 'String'), ('@value', 'kelvin')]), + "attribute", + OrderedDict([("@name", "units"), ("@type", "String"), ("@value", "kelvin")]), ), ] ) @@ -95,45 +93,43 @@ def test_add_variable_attribute(): @pytest.mark.parametrize( - 'variable,key,expected, var_index', + "variable,key,expected, var_index", [ ( - 'T', - 'units', + "T", + "units", OrderedDict( [ - ('@name', 'T'), - ('@shape', 'time lat lon'), - ('@type', 'double'), + ("@name", "T"), + ("@shape", "time lat lon"), + ("@type", "double"), ( - 'attribute', + "attribute", [ OrderedDict( [ - ('@name', 'long_name'), - ('@type', 'String'), - ('@value', 'surface temperature'), + ("@name", "long_name"), + ("@type", "String"), + ("@value", "surface temperature"), ] ), - OrderedDict([('@name', 'units'), ('@type', 'String'), ('@value', 'C')]), + OrderedDict([("@name", "units"), ("@type", "String"), ("@value", "C")]), ], ), - ('remove', OrderedDict([('@name', 'units'), ('@type', 'attribute')])), + ("remove", OrderedDict([("@name", "units"), ("@type", "attribute")])), ] ), 1, ), ( - 'Tidi', - 'unwantedvaribleAttribute', + "Tidi", + "unwantedvaribleAttribute", OrderedDict( [ - ('@name', 'Tidi'), + ("@name", "Tidi"), ( - 'remove', - OrderedDict( - [('@name', 'unwantedvaribleAttribute'), ('@type', 'attribute')] - ), + "remove", + OrderedDict([("@name", "unwantedvaribleAttribute"), ("@type", "attribute")]), ), ] ), @@ -144,39 +140,37 @@ def test_add_variable_attribute(): def test_remove_variable_attribute(variable, key, expected, var_index): nc = xncml.Dataset(input_file) nc.remove_variable_attribute(variable=variable, key=key) - res = nc.ncroot['netcdf']['variable'][var_index] + res = nc.ncroot["netcdf"]["variable"][var_index] assert res == expected def test_rename_variable(): # Rename existing variable nc = xncml.Dataset(input_file) - nc.rename_variable('lat', 'latitude') - res = nc.ncroot['netcdf']['variable'][2] + nc.rename_variable("lat", "latitude") + res = nc.ncroot["netcdf"]["variable"][2] expected = OrderedDict( [ - ('@name', 'latitude'), - ('@shape', 'lat'), - ('@type', 'float'), + ("@name", "latitude"), + ("@shape", "lat"), + ("@type", "float"), ( - 'attribute', + "attribute", [ - OrderedDict( - [('@name', 'units'), ('@type', 'String'), ('@value', 'degrees_north')] - ), + OrderedDict([("@name", "units"), ("@type", "String"), ("@value", "degrees_north")]), ], ), - ('values', '41.0 40.0 39.0'), - ('@orgName', 'lat'), + ("values", "41.0 40.0 39.0"), + ("@orgName", "lat"), ] ) assert expected == res # Rename non-existing variable - nc.rename_variable('Temp', 'Temperature') - res = nc.ncroot['netcdf']['variable'][-1] - assert res == OrderedDict([('@name', 'Temperature'), ('@orgName', 'Temp')]) + nc.rename_variable("Temp", "Temperature") + res = nc.ncroot["netcdf"]["variable"][-1] + assert res == OrderedDict([("@name", "Temperature"), ("@orgName", "Temp")]) def test_rename_variable_attribute(): @@ -185,131 +179,127 @@ def test_rename_variable_attribute(): expected = [ OrderedDict( [ - ('@name', 'Units'), - ('@type', 'String'), - ('@value', 'degrees_north'), - ('@orgName', 'units'), + ("@name", "Units"), + ("@type", "String"), + ("@value", "degrees_north"), + ("@orgName", "units"), ] ) ] - nc.rename_variable_attribute('lat', 'units', 'Units') - res = nc.ncroot['netcdf']['variable'][2]['attribute'] + nc.rename_variable_attribute("lat", "units", "Units") + res = nc.ncroot["netcdf"]["variable"][2]["attribute"] assert res == expected # Rename non-existing attribute (could be in netCDF file but not in NcML) - nc.rename_variable_attribute('lat', 'foo', 'bar') - res = nc.ncroot['netcdf']['variable'][2]['attribute'] - assert {'@name': 'bar', '@orgName': 'foo'} in res + nc.rename_variable_attribute("lat", "foo", "bar") + res = nc.ncroot["netcdf"]["variable"][2]["attribute"] + assert {"@name": "bar", "@orgName": "foo"} in res def test_rename_dimension(): nc = xncml.Dataset(input_file) - nc.rename_dimension('time', 'Time') - res = nc.ncroot['netcdf']['dimension'] + nc.rename_dimension("time", "Time") + res = nc.ncroot["netcdf"]["dimension"] expected = [ - OrderedDict( - [('@name', 'Time'), ('@length', '2'), ('@isUnlimited', 'true'), ('@orgName', 'time')] - ), - OrderedDict([('@name', 'lat'), ('@length', '3')]), - OrderedDict([('@name', 'lon'), ('@length', '4')]), + OrderedDict([("@name", "Time"), ("@length", "2"), ("@isUnlimited", "true"), ("@orgName", "time")]), + OrderedDict([("@name", "lat"), ("@length", "3")]), + OrderedDict([("@name", "lon"), ("@length", "4")]), ] assert res == expected # With non-existing dimension - nc.rename_dimension('time_bound', 'time_bounds') - assert '@orgName' in res[-1] + nc.rename_dimension("time_bound", "time_bounds") + assert "@orgName" in res[-1] def test_add_dataset_attribute(): nc = xncml.Dataset(input_file) - nc.add_dataset_attribute(key='editedby', value='foo') - nc.add_dataset_attribute(key='editedby', value='bar') + nc.add_dataset_attribute(key="editedby", value="foo") + nc.add_dataset_attribute(key="editedby", value="bar") expected = [ - OrderedDict([('@name', 'title'), ('@type', 'String'), ('@value', 'Example Data')]), - OrderedDict([('@name', 'editedby'), ('@type', 'String'), ('@value', 'bar')]), + OrderedDict([("@name", "title"), ("@type", "String"), ("@value", "Example Data")]), + OrderedDict([("@name", "editedby"), ("@type", "String"), ("@value", "bar")]), ] - res = nc.ncroot['netcdf']['attribute'] + res = nc.ncroot["netcdf"]["attribute"] assert res == expected def test_remove_dataset_attribute(): nc = xncml.Dataset(input_file) - nc.add_dataset_attribute('bar', 'foo') - nc.remove_dataset_attribute('title') - nc.remove_dataset_attribute('title') - nc.remove_dataset_attribute('bar') - expected_removals = nc.ncroot['netcdf']['remove'] - expected_removals = [ - removal for removal in expected_removals if removal['@type'] == 'attribute' - ] + nc.add_dataset_attribute("bar", "foo") + nc.remove_dataset_attribute("title") + nc.remove_dataset_attribute("title") + nc.remove_dataset_attribute("bar") + expected_removals = nc.ncroot["netcdf"]["remove"] + expected_removals = [removal for removal in expected_removals if removal["@type"] == "attribute"] assert len(expected_removals) == 2 def test_rename_dataset_attribute(): nc = xncml.Dataset(input_file) # Rename existing attribute - nc.rename_dataset_attribute(old_name='title', new_name='Title') - assert nc.ncroot['netcdf']['attribute'][0]['@name'] == 'Title' + nc.rename_dataset_attribute(old_name="title", new_name="Title") + assert nc.ncroot["netcdf"]["attribute"][0]["@name"] == "Title" # Rename attribute not in the NcML (but possibly in the netcdf `location`) - nc.rename_dataset_attribute(old_name='foo', new_name='bar') - assert nc.ncroot['netcdf']['attribute'][1]['@name'] == 'bar' + nc.rename_dataset_attribute(old_name="foo", new_name="bar") + assert nc.ncroot["netcdf"]["attribute"][1]["@name"] == "bar" def test_remove_variable(): nc = xncml.Dataset(input_file) - nc.remove_variable('lon') - expected = [OrderedDict([('@name', 'lon'), ('@type', 'variable')])] - res = nc.ncroot['netcdf']['remove'] + nc.remove_variable("lon") + expected = [OrderedDict([("@name", "lon"), ("@type", "variable")])] + res = nc.ncroot["netcdf"]["remove"] assert expected == res def test_add_aggregation(): nc = xncml.Dataset(input_file) - nc.add_aggregation('new_dim', 'joinNew') - nc.add_variable_agg('new_dim', 'newVar') + nc.add_aggregation("new_dim", "joinNew") + nc.add_variable_agg("new_dim", "newVar") expected = [ OrderedDict( [ - ('@dimName', 'new_dim'), - ('@type', 'joinNew'), - ('variableAgg', [OrderedDict([('@name', 'newVar')])]), + ("@dimName", "new_dim"), + ("@type", "joinNew"), + ("variableAgg", [OrderedDict([("@name", "newVar")])]), ] ) ] - res = nc.ncroot['netcdf']['aggregation'] + res = nc.ncroot["netcdf"]["aggregation"] assert expected == res def test_add_scan(): nc = xncml.Dataset(input_file) - nc.add_aggregation('new_dim', 'joinExisting') - nc.add_scan('new_dim', location='foo', suffix='.nc') + nc.add_aggregation("new_dim", "joinExisting") + nc.add_scan("new_dim", location="foo", suffix=".nc") expected = [ OrderedDict( [ - ('@dimName', 'new_dim'), - ('@type', 'joinExisting'), + ("@dimName", "new_dim"), + ("@type", "joinExisting"), ( - 'scan', - [OrderedDict([('@location', 'foo'), ('@subdirs', 'true'), ('@suffix', '.nc')])], + "scan", + [OrderedDict([("@location", "foo"), ("@subdirs", "true"), ("@suffix", ".nc")])], ), ] ) ] - res = nc.ncroot['netcdf']['aggregation'] + res = nc.ncroot["netcdf"]["aggregation"] assert expected == res def test_to_ncml(): nc = xncml.Dataset(input_file) - with tempfile.NamedTemporaryFile(suffix='.ncml') as t: + with tempfile.NamedTemporaryFile(suffix=".ncml") as t: nc.to_ncml(path=t.name) assert os.path.exists(t.name) @@ -325,21 +315,21 @@ def test_to_ncml(): def test_to_dict(): nc = xncml.Dataset(input_file) out = nc.to_cf_dict() - assert out['attributes']['title'] == 'Example Data' - assert out['variables']['rh']['attributes']['long_name'] == 'relative humidity' - assert out['variables']['rh']['type'] == 'int' - assert out['variables']['rh']['shape'] == ['time', 'lat', 'lon'] - assert out['dimensions']['time'] == 2 - assert 'groups' not in out + assert out["attributes"]["title"] == "Example Data" + assert out["variables"]["rh"]["attributes"]["long_name"] == "relative humidity" + assert out["variables"]["rh"]["type"] == "int" + assert out["variables"]["rh"]["shape"] == ["time", "lat", "lon"] + assert out["dimensions"]["time"] == 2 + assert "groups" not in out # Check coordinates are first - assert list(out['variables'].keys())[:3] == ['lat', 'lon', 'time'] + assert list(out["variables"].keys())[:3] == ["lat", "lon", "time"] - nc = xncml.Dataset(Path(here) / 'data' / 'aggNewCoord.ncml') + nc = xncml.Dataset(Path(here) / "data" / "aggNewCoord.ncml") out = nc.to_cf_dict() - assert out['variables']['time']['data'] == [0, 1, 2] + assert out["variables"]["time"]["data"] == [0, 1, 2] - nc = xncml.Dataset(Path(here) / 'data' / 'subsetCoordEdges.ncml') + nc = xncml.Dataset(Path(here) / "data" / "subsetCoordEdges.ncml") with pytest.raises(NotImplementedError): out = nc.to_cf_dict() diff --git a/tests/test_parser.py b/tests/test_parser.py index fea0cce..f9e56d9 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -13,12 +13,13 @@ # Would need to modify the XML files _live_ to reflect the actual path. -data = Path(__file__).parent / 'data' +data = Path(__file__).parent / "data" class CheckClose(object): """Check that files are closed after the test. Note that `close` has to be explicitly called within the - context manager for this to work.""" + context manager for this to work. + """ def __init__(self): self.proc = psutil.Process() @@ -31,281 +32,281 @@ def __exit__(self, *args): """Raise error if files are left open at the end of the test.""" after = len(self.proc.open_files()) if after != self.before: - raise AssertionError(f'Files left open after test: {after - self.before}') + raise AssertionError(f"Files left open after test: {after - self.before}") def test_aggexisting(): with CheckClose(): - ds = xncml.open_ncml(data / 'aggExisting.xml') + ds = xncml.open_ncml(data / "aggExisting.xml") check_dimension(ds) check_coord_var(ds) check_agg_coord_var(ds) check_read_data(ds) - assert ds['time'].attrs['ncmlAdded'] == 'timeAtt' + assert ds["time"].attrs["ncmlAdded"] == "timeAtt" ds.close() def test_aggexisting_w_coords(): with CheckClose(): - ds = xncml.open_ncml(data / 'aggExistingWcoords.xml') + ds = xncml.open_ncml(data / "aggExistingWcoords.xml") check_dimension(ds) check_coord_var(ds) check_agg_coord_var(ds) check_read_data(ds) - assert ds['time'].attrs['ncmlAdded'] == 'timeAtt' + assert ds["time"].attrs["ncmlAdded"] == "timeAtt" ds.close() def test_aggexisting_coords_var(): - ds = xncml.open_ncml(data / 'aggExisting1.xml') + ds = xncml.open_ncml(data / "aggExisting1.xml") check_dimension(ds) check_coord_var(ds) check_agg_coord_var(ds) check_read_data(ds) - assert all(ds['time'].data == list(range(7, 125, 2))) + assert all(ds["time"].data == list(range(7, 125, 2))) def test_agg_new(): - ds = xncml.open_ncml(data / 'aggNew.ncml') + ds = xncml.open_ncml(data / "aggNew.ncml") assert len(ds.time) == 3 assert all(ds.time.data == [0, 10, 99]) - assert 'T' in ds.data_vars + assert "T" in ds.data_vars assert len(ds.lat) == 3 def test_agg_new_coord(): - ds = xncml.open_ncml(data / 'aggNewCoord.ncml') + ds = xncml.open_ncml(data / "aggNewCoord.ncml") assert ds.time.dtype == np.int32 assert len(ds.time) == 3 assert all(ds.time.data == [0, 1, 2]) - assert ds.time.attrs['units'] == 'months since 2000-6-16 6:00' - assert 'T' in ds.data_vars + assert ds.time.attrs["units"] == "months since 2000-6-16 6:00" + assert "T" in ds.data_vars assert len(ds.lat) == 3 def test_agg_existing2(): - ds = xncml.open_ncml(data / 'aggExisting2.xml') - assert ds['time'].attrs['units'] == 'hours since 2006-06-16 00:00' - assert ds['time'].dtype == float - assert all(ds['time'].data == [12.0, 13.0, 14.0]) + ds = xncml.open_ncml(data / "aggExisting2.xml") + assert ds["time"].attrs["units"] == "hours since 2006-06-16 00:00" + assert ds["time"].dtype == float + assert all(ds["time"].data == [12.0, 13.0, 14.0]) def test_agg_existing4(): - ds = xncml.open_ncml(data / 'aggExisting4.ncml') - assert all(ds['time'].data == [1.1496816e9, 1.1496852e9, 1.1496888e9]) + ds = xncml.open_ncml(data / "aggExisting4.ncml") + assert all(ds["time"].data == [1.1496816e9, 1.1496852e9, 1.1496888e9]) def test_agg_existing5(): - ds = xncml.open_ncml(data / 'aggExisting5.ncml') - assert ds['time'].dtype == np.int32 - assert all(ds['time'].data == list(range(59))) + ds = xncml.open_ncml(data / "aggExisting5.ncml") + assert ds["time"].dtype == np.int32 + assert all(ds["time"].data == list(range(59))) def test_agg_existing_add_coords(): # TODO: Complete test - ds = xncml.open_ncml(data / 'aggExistingAddCoord.ncml') - assert 'time' in ds.variables + ds = xncml.open_ncml(data / "aggExistingAddCoord.ncml") + assert "time" in ds.variables def test_modify_atts(): - ds = xncml.open_ncml(data / 'modifyAtts.xml') - assert ds.attrs['Conventions'] == 'Metapps' - assert 'title' not in ds.attrs - assert 'UNITS' in ds['rh'].attrs - assert 'units' not in ds['rh'].attrs - assert ds['rh'].attrs['longer_name'] == 'Abe said what?' - assert 'long_name' not in ds['rh'].attrs + ds = xncml.open_ncml(data / "modifyAtts.xml") + assert ds.attrs["Conventions"] == "Metapps" + assert "title" not in ds.attrs + assert "UNITS" in ds["rh"].attrs + assert "units" not in ds["rh"].attrs + assert ds["rh"].attrs["longer_name"] == "Abe said what?" + assert "long_name" not in ds["rh"].attrs def test_modify_vars(): - ds = xncml.open_ncml(data / 'modifyVars.xml') - assert ds.attrs['Conventions'] == 'added' - assert ds.attrs['title'] == 'replaced' + ds = xncml.open_ncml(data / "modifyVars.xml") + assert ds.attrs["Conventions"] == "added" + assert ds.attrs["title"] == "replaced" - assert 'deltaLat' in ds.data_vars - assert all(ds['deltaLat'].data == [0.1, 0.1, 0.01]) - assert ds['deltaLat'].dtype == float + assert "deltaLat" in ds.data_vars + assert all(ds["deltaLat"].data == [0.1, 0.1, 0.01]) + assert ds["deltaLat"].dtype == float - assert 'Temperature' in ds.data_vars - assert 'T' not in ds.data_vars + assert "Temperature" in ds.data_vars + assert "T" not in ds.data_vars - assert 'ReletiveHumidity' in ds.data_vars - assert 'rh' not in ds.data_vars - rh = ds['ReletiveHumidity'] - assert rh.attrs['long_name2'] == 'relatively humid' - assert rh.attrs['units'] == 'percent (%)' - assert 'long_name' not in rh.attrs + assert "ReletiveHumidity" in ds.data_vars + assert "rh" not in ds.data_vars + rh = ds["ReletiveHumidity"] + assert rh.attrs["long_name2"] == "relatively humid" + assert rh.attrs["units"] == "percent (%)" + assert "long_name" not in rh.attrs def test_agg_syn_grid(): - ds = xncml.open_ncml(data / 'aggSynGrid.xml') + ds = xncml.open_ncml(data / "aggSynGrid.xml") assert len(ds.lat) == 3 assert len(ds.lon) == 4 assert len(ds.time) == 3 - assert all(ds.time == ['2005-11-22 22:19:53Z', '2005-11-22 23:19:53Z', '2005-11-23 00:19:59Z']) + assert all(ds.time == ["2005-11-22 22:19:53Z", "2005-11-22 23:19:53Z", "2005-11-23 00:19:59Z"]) def test_agg_syn_no_coord(): - ds = xncml.open_ncml(data / 'aggSynNoCoord.xml') + ds = xncml.open_ncml(data / "aggSynNoCoord.xml") assert len(ds.lat) == 3 assert len(ds.lon) == 4 assert len(ds.time) == 3 def test_agg_syn_no_coords_dir(): - ds = xncml.open_ncml(data / 'aggSynNoCoordsDir.xml') + ds = xncml.open_ncml(data / "aggSynNoCoordsDir.xml") assert len(ds.lat) == 3 assert len(ds.lon) == 4 assert len(ds.time) == 3 def test_agg_synthetic(): - ds = xncml.open_ncml(data / 'aggSynthetic.xml') + ds = xncml.open_ncml(data / "aggSynthetic.xml") assert len(ds.time) == 3 assert all(ds.time == [0, 10, 99]) def test_agg_synthetic_2(): - ds = xncml.open_ncml(data / 'aggSynthetic2.xml') + ds = xncml.open_ncml(data / "aggSynthetic2.xml") assert len(ds.time) == 3 assert all(ds.time == [0, 1, 2]) def test_agg_synthetic_3(): - ds = xncml.open_ncml(data / 'aggSynthetic3.xml') + ds = xncml.open_ncml(data / "aggSynthetic3.xml") assert len(ds.time) == 3 assert all(ds.time == [0, 10, 99]) def test_agg_syn_scan(): with CheckClose(): - ds = xncml.open_ncml(data / 'aggSynScan.xml') + ds = xncml.open_ncml(data / "aggSynScan.xml") assert len(ds.time) == 3 assert all(ds.time == [0, 10, 20]) ds.close() def test_agg_syn_rename(): - ds = xncml.open_ncml(data / 'aggSynRename.xml') + ds = xncml.open_ncml(data / "aggSynRename.xml") assert len(ds.time) == 3 - assert 'T' not in ds - assert 'Temperature' in ds + assert "T" not in ds + assert "Temperature" in ds def test_rename_var(): - ds = xncml.open_ncml(data / 'renameVar.xml') - assert ds.attrs['title'] == 'Example Data' + ds = xncml.open_ncml(data / "renameVar.xml") + assert ds.attrs["title"] == "Example Data" - assert 'ReletiveHumidity' in ds + assert "ReletiveHumidity" in ds assert all(ds.lat.data == [41.0, 40.0, 39.0]) assert all(ds.lon.data == [-109.0, -107.0, -105.0, -103.0]) assert ds.lon.dtype == np.float32 assert all(ds.time.data == [6, 18, 24, 36]) assert ds.time.dtype == np.int32 - assert all(np.equal(ds.attrs['testFloat'], [1.0, 2.0, 3.0, 4.0])) + assert all(np.equal(ds.attrs["testFloat"], [1.0, 2.0, 3.0, 4.0])) - assert ds.attrs['testByte'][0].dtype == np.int8 - assert ds.attrs['testShort'][0].dtype == np.int16 - assert ds.attrs['testInt'][0].dtype == np.int32 - assert ds.attrs['testFloat'][0].dtype == np.float32 - assert ds.attrs['testDouble'][0].dtype == np.float64 + assert ds.attrs["testByte"][0].dtype == np.int8 + assert ds.attrs["testShort"][0].dtype == np.int16 + assert ds.attrs["testInt"][0].dtype == np.int32 + assert ds.attrs["testFloat"][0].dtype == np.float32 + assert ds.attrs["testDouble"][0].dtype == np.float64 def test_agg_union_simple(): - ds = xncml.open_ncml(data / 'aggUnionSimple.xml') - assert ds.attrs['title'] == 'Union cldc and lflx' + ds = xncml.open_ncml(data / "aggUnionSimple.xml") + assert ds.attrs["title"] == "Union cldc and lflx" assert len(ds.lat) == 21 - assert ds.lat.attrs['units'] == 'degrees_north' + assert ds.lat.attrs["units"] == "degrees_north" assert all(ds.lat.data[:3] == [10, 9, 8]) assert len(ds.time) == 456 - assert 'lflx' in ds - assert 'cldc' in ds + assert "lflx" in ds + assert "cldc" in ds assert ds.lflx.shape == (456, 21, 360) def test_agg_union(): - ds = xncml.open_ncml(data / 'aggUnion.xml') - assert ds.attrs['title'] == 'Example Data' + ds = xncml.open_ncml(data / "aggUnion.xml") + assert ds.attrs["title"] == "Example Data" assert ds.lat.size == 3 assert ds.time.size == 2 assert ds.ReletiveHumidity.shape == (2, 3, 4) - assert ds.ReletiveHumidity.attrs['units'] == 'percent' + assert ds.ReletiveHumidity.attrs["units"] == "percent" assert ds.Temperature.shape == (2, 3, 4) - assert ds.Temperature.attrs['units'] == 'degC' + assert ds.Temperature.attrs["units"] == "degC" def test_agg_union_rename(): - ds = xncml.open_ncml(data / 'aggUnionRename.xml') - assert 'LavaFlow' in ds.variables + ds = xncml.open_ncml(data / "aggUnionRename.xml") + assert "LavaFlow" in ds.variables def test_agg_union_scan(): - ds = xncml.open_ncml(data / 'aggUnionScan.xml') - assert 'lflx' in ds - assert 'cldc' in ds + ds = xncml.open_ncml(data / "aggUnionScan.xml") + assert "lflx" in ds + assert "cldc" in ds def test_read(): - ds = xncml.open_ncml(data / 'testRead.xml') - assert ds.attrs['title'] == 'Example Data' - assert ds.attrs['testFloat'] == (1.0, 2.0, 3.0, 4.0) + ds = xncml.open_ncml(data / "testRead.xml") + assert ds.attrs["title"] == "Example Data" + assert ds.attrs["testFloat"] == (1.0, 2.0, 3.0, 4.0) def test_read_override(): - ds = xncml.open_ncml(data / 'testReadOverride.xml') - assert 'rh' not in ds.variables + ds = xncml.open_ncml(data / "testReadOverride.xml") + assert "rh" not in ds.variables -@pytest.mark.skip(reason='unclear if this is meant to fail') +@pytest.mark.skip(reason="unclear if this is meant to fail") def test_read_https(): - ds = xncml.open_ncml(data / 'testReadHttps.xml') - assert ds.attrs['title'] == 'Example Data' + ds = xncml.open_ncml(data / "testReadHttps.xml") + assert ds.attrs["title"] == "Example Data" def test_agg_existing_inequivalent_cals(): - ds = xncml.open_ncml(data / 'agg_with_calendar/aggExistingInequivalentCals.xml') + ds = xncml.open_ncml(data / "agg_with_calendar/aggExistingInequivalentCals.xml") assert ds.time.size == 725 assert ds.time[-1] == dt.datetime(2018, 12, 31) -@pytest.mark.skip(reason='dateFormatMark not implemented') +@pytest.mark.skip(reason="dateFormatMark not implemented") def test_aggexistingone(): - ds = xncml.open_ncml(data / 'aggExistingOne.ncml') + ds = xncml.open_ncml(data / "aggExistingOne.ncml") assert len(ds.time) == 3 -@pytest.mark.skip(reason='dateFormatMark not implemented') -@pytest.mark.skip(reason=' not implemented') +@pytest.mark.skip(reason="dateFormatMark not implemented") +@pytest.mark.skip(reason=" not implemented") def test_agg_existing_promote(): - ds = xncml.open_ncml(data / 'aggExistingPromote.ncml') - assert 'times' in ds.variables + ds = xncml.open_ncml(data / "aggExistingPromote.ncml") + assert "times" in ds.variables -@pytest.mark.skip(reason=' not implemented') +@pytest.mark.skip(reason=" not implemented") def test_agg_existing_promote2(): - _ = xncml.open_ncml(data / 'aggExistingPromote2.ncml') + _ = xncml.open_ncml(data / "aggExistingPromote2.ncml") def test_agg_join_new_scalar_coord(): - _ = xncml.open_ncml(data / 'aggJoinNewScalarCoord.xml') + _ = xncml.open_ncml(data / "aggJoinNewScalarCoord.xml") # TODO: Complete test def test_exercise_1(): - _ = xncml.open_ncml(data / 'exercise1.ncml') + _ = xncml.open_ncml(data / "exercise1.ncml") # TODO: Complete test def test_read_meta_data(): - ds = xncml.open_ncml(data / 'readMetadata.xml') - assert ds.attrs['title'] == 'Example Data' - assert ds.variables['T'].attrs['units'] == 'degC' + ds = xncml.open_ncml(data / "readMetadata.xml") + assert ds.attrs["title"] == "Example Data" + assert ds.variables["T"].attrs["units"] == "degC" def test_unsigned_type(): - ds = xncml.open_ncml(data / 'testUnsignedType.xml') - assert ds['be_or_not_to_be'].dtype == np.uintc + ds = xncml.open_ncml(data / "testUnsignedType.xml") + assert ds["be_or_not_to_be"].dtype == np.uintc def test_empty_scalar__no_values_tag(): @@ -313,123 +314,123 @@ def test_empty_scalar__no_values_tag(): A scalar variable which is missing will have its value set to the default value of its type. """ - ds = xncml.open_ncml(data / 'testEmptyScalar.xml') - assert ds['empty_scalar_var'].dtype == np.dtype('float64') - assert ds['empty_scalar_var'].item() == 0 + ds = xncml.open_ncml(data / "testEmptyScalar.xml") + assert ds["empty_scalar_var"].dtype == np.dtype("float64") + assert ds["empty_scalar_var"].item() == 0 def test_empty_scalar__with_empty_values_tag(): """A scalar with an empty tag is invalid.""" - with pytest.raises(ValueError, match='No values found for variable .*'): - xncml.open_ncml(data / 'testEmptyScalar_withValuesTag.xml') + with pytest.raises(ValueError, match="No values found for variable .*"): + xncml.open_ncml(data / "testEmptyScalar_withValuesTag.xml") def test_multiple_values_for_scalar(): """A scalar with multiple values in its tag is invalid.""" - with pytest.raises(ValueError, match='The expected size for variable .* was 1, .*'): - xncml.open_ncml(data / 'testEmptyScalar_withMultipleValues.xml') + with pytest.raises(ValueError, match="The expected size for variable .* was 1, .*"): + xncml.open_ncml(data / "testEmptyScalar_withMultipleValues.xml") def test_read_enum(): """A enum should be turned into CF flag_values and flag_meanings attributes.""" - ds = xncml.open_ncml(data / 'testEnums.xml') - assert ds.be_or_not_to_be.dtype.metadata['enum'] == {'false': 0, 'true': 1} - assert ds.be_or_not_to_be.dtype.metadata['enum_name'] == 'boolean' + ds = xncml.open_ncml(data / "testEnums.xml") + assert ds.be_or_not_to_be.dtype.metadata["enum"] == {"false": 0, "true": 1} + assert ds.be_or_not_to_be.dtype.metadata["enum_name"] == "boolean" def test_empty_attr(): """A empty attribute is valid.""" - ds = xncml.open_ncml(data / 'testEmptyAttr.xml') - assert ds.attrs['comment'] == '' + ds = xncml.open_ncml(data / "testEmptyAttr.xml") + assert ds.attrs["comment"] == "" def test_read_group__read_only_root_group(): """By default, only read root group.""" - ds = xncml.open_ncml(data / 'testGroup.xml') + ds = xncml.open_ncml(data / "testGroup.xml") assert ds.toto is not None - assert ds.get('group_var') is None - assert ds.get('other_group_var') is None + assert ds.get("group_var") is None + assert ds.get("other_group_var") is None def test_read_group__read_sub_group(): """Read specified sub group and its parents.""" - ds = xncml.open_ncml(data / 'testGroup.xml', group='a_sub_group') + ds = xncml.open_ncml(data / "testGroup.xml", group="a_sub_group") assert ds.toto is not None - assert ds.get('group_var') is not None - ds.group_var.attrs['group_path'] = '/a_sub_group' - assert ds.get('other_group_var') is None + assert ds.get("group_var") is not None + ds.group_var.attrs["group_path"] = "/a_sub_group" + assert ds.get("other_group_var") is None def test_read_group__conflicting_dims(): """Read a group and ensure its dimension is correct""" - ds = xncml.open_ncml(data / 'testGroupConflictingDims.xml', group='gr_b') - assert ds.dims['index'] == 94 - assert 'index' in ds.gr_b_var.dims + ds = xncml.open_ncml(data / "testGroupConflictingDims.xml", group="gr_b") + assert ds.dims["index"] == 94 + assert "index" in ds.gr_b_var.dims def test_read__invalid_dim(): with pytest.raises(ValueError, match="Unknown dimension 'myDim'.*"): - xncml.open_ncml(data / 'testGroupInvalidDim.xml') + xncml.open_ncml(data / "testGroupInvalidDim.xml") def test_flatten_groups(): """Read every group and flatten everything in a single dataset/group.""" - ds = xncml.open_ncml(data / 'testGroup.xml', group='*') + ds = xncml.open_ncml(data / "testGroup.xml", group="*") assert ds.toto is not None - assert ds.get('toto__1') is None - assert ds.get('group_var') is not None - ds.group_var.attrs['group_path'] = '/a_sub_group' - assert ds.get('other_group_var') is not None - ds.other_group_var.attrs['group_path'] = '/another_sub_group' + assert ds.get("toto__1") is None + assert ds.get("group_var") is not None + ds.group_var.attrs["group_path"] = "/a_sub_group" + assert ds.get("other_group_var") is not None + ds.other_group_var.attrs["group_path"] = "/another_sub_group" def test_flatten_groups__conflicting_dims(): """Read every group and rename dimensions""" - ds = xncml.open_ncml(data / 'testGroupConflictingDims.xml', group='*') - assert 'index' in ds.gr_a_var.dims - assert ds.dims['index'] is not None - assert 'index__1' in ds.gr_b_var.dims - assert ds.dims['index__1'] is not None + ds = xncml.open_ncml(data / "testGroupConflictingDims.xml", group="*") + assert "index" in ds.gr_a_var.dims + assert ds.dims["index"] is not None + assert "index__1" in ds.gr_b_var.dims + assert ds.dims["index__1"] is not None def test_flatten_groups__sub_groups(): """Read every group and rename dimensions""" - ds = xncml.open_ncml(data / 'testGroupMultiLayers.xml', group='*') - assert ds.dims['index'] == 42 - assert ds.dims['index__1'] == 22 - assert ds['a_var'].size == 1 - assert ds['a_var'] == 2 - assert ds['a_var__1'].size == 42 - assert ds['a_var__2'].size == 22 + ds = xncml.open_ncml(data / "testGroupMultiLayers.xml", group="*") + assert ds.dims["index"] == 42 + assert ds.dims["index__1"] == 22 + assert ds["a_var"].size == 1 + assert ds["a_var"] == 2 + assert ds["a_var__1"].size == 42 + assert ds["a_var__2"].size == 22 # --- # def check_dimension(ds): - assert len(ds['lat']) == 3 - assert len(ds['lon']) == 4 - assert len(ds['time']) == 59 + assert len(ds["lat"]) == 3 + assert len(ds["lon"]) == 4 + assert len(ds["time"]) == 59 def check_coord_var(ds): - lat = ds['lat'] + lat = ds["lat"] assert len(lat) == 3 assert lat.dtype == np.float32 - assert lat.dims == ('lat',) - assert lat.attrs['units'] == 'degrees_north' + assert lat.dims == ("lat",) + assert lat.attrs["units"] == "degrees_north" assert all(lat.data == [41.0, 40.0, 39.0]) def check_agg_coord_var(ds): - time = ds['time'] - assert time.dims == ('time',) + time = ds["time"] + assert time.dims == ("time",) assert len(time) == 59 assert time.dtype == np.int32 def check_read_data(ds): - t = ds['T'] - assert t.dims == ('time', 'lat', 'lon') + t = ds["T"] + assert t.dims == ("time", "lat", "lon") assert t.size == 708 assert t.shape == (59, 3, 4) assert t.dtype == float - assert 'T' in ds.data_vars + assert "T" in ds.data_vars diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..8b4f9be --- /dev/null +++ b/tox.ini @@ -0,0 +1,52 @@ +[tox] +min_version = 4.0 +envlist = + lint + py{38,39,310,311,312} + docs + coveralls +requires = + flit + pip >= 23.3.0 +opts = + --verbose + +[testenv:lint] +skip_install = True +deps = + flake8 + ruff >=0.2.0 +commands = + make lint +allowlist_externals = + make + +[testenv:docs] +extras = + docs +commands = + make --directory=docs clean html +allowlist_externals = + make + +[testenv] +setenv = + PYTEST_ADDOPTS = "--color=yes" + PYTHONPATH = {toxinidir} +passenv = + GITHUB_* +extras = + dev +download = True +install_command = python -m pip install --no-user {opts} {packages} +deps = +; If you want to make tox run the tests with the same versions, create a +; requirements.txt with the pinned versions and uncomment the following line: +; -r{toxinidir}/requirements.txt +commands_pre = + pip list + pip check +commands = + pytest --cov + # Coveralls requires access to a repo token set in .coveralls.yml in order to report stats + coveralls: - coveralls diff --git a/xncml/__init__.py b/xncml/__init__.py deleted file mode 100644 index e002a33..0000000 --- a/xncml/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python -"""Top-level module for xncml.""" -from pkg_resources import DistributionNotFound, get_distribution - -from .core import Dataset -from .parser import open_ncml - -try: - __version__ = get_distribution(__name__).version -except DistributionNotFound: - # package is not installed - pass -finally: - del get_distribution, DistributionNotFound From 616a5898ac29512ba57585410f3d7cf3e172aa05 Mon Sep 17 00:00:00 2001 From: Abel Aoun Date: Mon, 15 Apr 2024 17:27:21 +0200 Subject: [PATCH 05/26] maint: migrate .github from cookiecutter --- .github/ISSUE_TEMPLATE.md | 20 +--- .../0001-GENERIC-ISSUE-TEMPLATE.yml | 34 ++++++ .github/ISSUE_TEMPLATE/0002-BUG-REPORT.yml | 44 ++++++++ .../ISSUE_TEMPLATE/0003-FEATURE-REQUEST.yml | 31 ++++++ .../ISSUE_TEMPLATE/0004-QUESTION-SUPPORT.yml | 23 ++++ .github/ISSUE_TEMPLATE/config.yml | 1 + .github/PULL_REQUEST_TEMPLATE.md | 19 +++- .github/dependabot.yml | 15 +++ .github/workflows/bump-version.yml | 86 +++++++++++++++ .github/workflows/cache-cleaner.yml | 49 +++++++++ .github/workflows/dependency-review.yml | 31 ++++++ .github/workflows/first-pull-request.yml | 54 ++++++++++ .github/workflows/main.yml | 102 +++++++++++------- .github/workflows/publish-pypi.yml | 42 ++++++++ .github/workflows/python-publish.yml | 42 -------- .github/workflows/scorecard.yml | 82 ++++++++++++++ .github/workflows/tag-testpypi.yml | 69 ++++++++++++ .github/workflows/workflow-warning.yml | 69 ++++++++++++ 18 files changed, 712 insertions(+), 101 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/0001-GENERIC-ISSUE-TEMPLATE.yml create mode 100644 .github/ISSUE_TEMPLATE/0002-BUG-REPORT.yml create mode 100644 .github/ISSUE_TEMPLATE/0003-FEATURE-REQUEST.yml create mode 100644 .github/ISSUE_TEMPLATE/0004-QUESTION-SUPPORT.yml create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/bump-version.yml create mode 100644 .github/workflows/cache-cleaner.yml create mode 100644 .github/workflows/dependency-review.yml create mode 100644 .github/workflows/first-pull-request.yml create mode 100644 .github/workflows/publish-pypi.yml delete mode 100644 .github/workflows/python-publish.yml create mode 100644 .github/workflows/scorecard.yml create mode 100644 .github/workflows/tag-testpypi.yml create mode 100644 .github/workflows/workflow-warning.yml diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 299ecf3..9ae9d64 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,25 +1,11 @@ -Thanks for submitting an issue! - -Here's a quick checklist in what to include: - -- [ ] Include a detailed description of the bug or suggestion -- [ ] `conda list` of the conda environment you are using -- [ ] Minimal, self-contained copy-pastable example that generates the issue if possible. Please be concise with code posted. See guidelines below on how to provide a good bug report: - - - [Minimal Complete Verifiable Examples](https://stackoverflow.com/help/mcve) - - [Craft Minimal Bug Reports](http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports) - - Bug reports that follow these guidelines are easier to diagnose, - and so are often handled much more quickly. - - +* xncml version: +* Python version: +* Operating System: ### Description -``` Describe what you were trying to get done. Tell us what happened, what went wrong, and what you expected to happen. -``` ### What I Did diff --git a/.github/ISSUE_TEMPLATE/0001-GENERIC-ISSUE-TEMPLATE.yml b/.github/ISSUE_TEMPLATE/0001-GENERIC-ISSUE-TEMPLATE.yml new file mode 100644 index 0000000..256a447 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/0001-GENERIC-ISSUE-TEMPLATE.yml @@ -0,0 +1,34 @@ +name: Generic issue template +description: For detailing generic/uncategorized issues in xncml + +body: + - type: textarea + id: generic-issue + attributes: + label: Generic Issue + description: Please fill in the following information fields as needed. + value: | + * xncml version: + * Python version: + * Operating System: + + ### Description + + + ### What I Did + + ``` + $ pip install foo --bar + ``` + + ### What I Received + + ``` + Traceback (most recent call last): + File "/path/to/file/script.py", line 3326, in run_code + exec(code_obj, self.user_global_ns, self.user_ns) + File "", line 1, in + 1/0 + ZeroDivisionError: division by zero diff --git a/.github/ISSUE_TEMPLATE/0002-BUG-REPORT.yml b/.github/ISSUE_TEMPLATE/0002-BUG-REPORT.yml new file mode 100644 index 0000000..2c9e808 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/0002-BUG-REPORT.yml @@ -0,0 +1,44 @@ +name: Bug report +description: Help us improve xncml +labels: [ "bug" ] + +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this bug report! + - type: textarea + id: setup-information + attributes: + label: Setup Information + description: | + What software versions are you running? Example: + - xncml version: 0.55.0-gamma + - Python version: 4.2 + - Operating System: Nutmeg Linux 12.34 | macOS 11.0 "Redmond" + value: | + - xncml version: + - Python version: + - Operating System: + - type: textarea + id: description + attributes: + label: Description + description: Describe what you were trying to get done. Tell us what happened, what went wrong, and what you expected to happen. + - type: textarea + id: steps-to-reproduce + attributes: + label: Steps To Reproduce + description: Paste the command(s) you ran and the output. If there was a crash, please include the traceback below. + - type: textarea + id: additional-context + attributes: + label: Additional context + description: Add any other context about the problem here. + - type: checkboxes + id: submit-pr + attributes: + label: Contribution + description: Do you intend to submit a fix for this bug? (The xncml developers will help with code compliance) + options: + - label: I would be willing/able to open a Pull Request to address this bug. diff --git a/.github/ISSUE_TEMPLATE/0003-FEATURE-REQUEST.yml b/.github/ISSUE_TEMPLATE/0003-FEATURE-REQUEST.yml new file mode 100644 index 0000000..5f680b4 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/0003-FEATURE-REQUEST.yml @@ -0,0 +1,31 @@ +name: Feature request +description: Suggest an idea for xncml +labels: [ "enhancement" ] + +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this feature request! + - type: textarea + id: problem + attributes: + label: Addressing a Problem? + description: Is your feature request related to a problem? Please describe it. + - type: textarea + id: potential-solution + attributes: + label: Potential Solution + description: Describe the solution you'd like to see implemented. + - type: textarea + id: additional-context + attributes: + label: Additional context + description: Add any other context about the feature request here. + - type: checkboxes + id: submit-pr + attributes: + label: Contribution + description: Do you intend to submit a fix for this bug? (The xncml developers will help with code compliance) + options: + - label: I would be willing/able to open a Pull Request to contribute this feature. diff --git a/.github/ISSUE_TEMPLATE/0004-QUESTION-SUPPORT.yml b/.github/ISSUE_TEMPLATE/0004-QUESTION-SUPPORT.yml new file mode 100644 index 0000000..a04894e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/0004-QUESTION-SUPPORT.yml @@ -0,0 +1,23 @@ +name: Question/Support +description: Ask for help from the developers +labels: [ "support" ] + +body: + - type: textarea + id: setup-information + attributes: + label: Setup Information + description: | + What software versions are you running? Example: + - xncml version: 0.55.0-gamma + - Python version: 4.2 + - Operating System: Nutmeg Linux 12.34 | macOS 11.0 "Redmond" + value: | + - xncml version: + - Python version: + - Operating System: + - type: textarea + id: description + attributes: + label: Context + description: Describe what you were trying to get done. Tell us what happened, what went wrong, and what you expected to happen. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..0086358 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: true diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 3580015..dc5f20e 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,9 +1,18 @@ -Thanks for submitting a PR, your contribution is really appreciated! + + +### Pull Request Checklist: +- [ ] This PR addresses an already opened issue (for bug fixes / features) + - This PR fixes #xyz +- [ ] (If applicable) Documentation has been added / updated (for bug fixes / features). +- [ ] (If applicable) Tests have been added. +- [ ] CHANGES.rst has been updated (with summary of main changes). + - [ ] Link to issue (:issue:`number`) and pull request (:pull:`number`) has been added. -Here's a quick checklist that should be present in PRs (you can delete this text from the final description, this is just a guideline): +### What kind of change does this PR introduce? +* ... -- [ ] Include documentation when adding new features. -- [ ] Include new tests or update existing tests when applicable. +### Does this PR introduce a breaking change? -[summarize your pull request here] + +### Other information: diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..a9f2875 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,15 @@ +version: 2 +updates: + - package-ecosystem: github-actions + directory: / + schedule: + interval: daily + time: '12:00' + open-pull-requests-limit: 10 + + - package-ecosystem: pip + directory: / + schedule: + interval: daily + time: '12:00' + open-pull-requests-limit: 10 diff --git a/.github/workflows/bump-version.yml b/.github/workflows/bump-version.yml new file mode 100644 index 0000000..9f57c0f --- /dev/null +++ b/.github/workflows/bump-version.yml @@ -0,0 +1,86 @@ +# This workflow requires a personal access token named `BUMP_VERSION_TOKEN` with the following privileges: +# - Contents: Read and Write +# - Metadata: Read-Only +# - Pull Requests: Read and Write + +name: "Bump Patch Version" + +on: + push: + branches: + - main + paths-ignore: + - .cruft.json + - .editorconfig + - .github/**.yml + - .gitignore + - .pre-commit-config.yaml + - .yamllint.yaml + - .zenodo.json + - AUTHORS.rst + - CHANGES.rst + - CONTRIBUTING.rst + - Makefile + - .readthedocs.yml + - docs/*.py + - docs/*.rst + - environment-docs.yml + - pyproject.toml + - tests/**.py + - tox.ini + - src/xncml/__init__.py + workflow_dispatch: + +permissions: + contents: read + +jobs: + bump_patch_version: + runs-on: ubuntu-latest + permissions: + actions: read + contents: write + steps: + - name: Harden Runner + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + with: + disable-sudo: true + egress-policy: block + allowed-endpoints: > + files.pythonhosted.org:443 + github.com:443 + pypi.org:443 + - uses: actions/checkout@v4 + with: + persist-credentials: false + - uses: actions/setup-python@v4 + with: + python-version: "3.x" + - name: Config Commit Bot + run: | + git config --local user.email "bumpversion[bot]@ouranos.ca" + git config --local user.name "bumpversion[bot]" + - name: Install bump-my-version + run: | + python -m pip install "bump-my-version>=0.18.3" + - name: Current Version + run: | + bump-my-version show current_version + CURRENT_VERSION="$(grep -E '__version__' src/xncml/__init__.py | cut -d ' ' -f3)" + echo "CURRENT_VERSION=${CURRENT_VERSION}" >> $GITHUB_ENV + - name: Conditional Bump Version + run: | + if [[ ${{ env.CURRENT_VERSION }} =~ -dev(\.\d+)? ]]; then + echo "Development version (ends in 'dev(\.\d+)?'), bumping 'build' version" + bump-my-version bump build + else + echo "Version is stable, bumping 'patch' version" + bump-my-version bump patch + fi + bump-my-version show-bump + - name: Push Changes + uses: ad-m/github-push-action@master + with: + force: false + github_token: ${{ secrets.BUMP_VERSION_TOKEN }} + branch: ${{ github.ref }} diff --git a/.github/workflows/cache-cleaner.yml b/.github/workflows/cache-cleaner.yml new file mode 100644 index 0000000..d48a7f6 --- /dev/null +++ b/.github/workflows/cache-cleaner.yml @@ -0,0 +1,49 @@ +# Example taken from https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#managing-caches +name: Cleanup Caches on Pull Request Merge +on: + pull_request: + types: + - closed + +permissions: # added using https://github.com/step-security/secure-repo + contents: read + +jobs: + cleanup: + name: Cleanup + runs-on: ubuntu-latest + permissions: + actions: write + steps: + - name: Harden Runner + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + with: + disable-sudo: true + egress-policy: block + allowed-endpoints: > + api.github.com:443 + github.com:443 + objects.githubusercontent.com:443 + + - uses: actions/checkout@v4.1.1 + + - name: Cleanup + run: | + gh extension install actions/gh-actions-cache + + REPO=${{ github.repository }} + BRANCH="refs/pull/${{ github.event.pull_request.number }}/merge" + + echo "Fetching list of cache key" + cacheKeysForPR=$(gh actions-cache list -R $REPO -B $BRANCH -L 100 | cut -f 1 ) + + ## Setting this to not fail the workflow while deleting cache keys. + set +e + echo "Deleting caches..." + for cacheKey in $cacheKeysForPR + do + gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm + done + echo "Done" + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml new file mode 100644 index 0000000..c977388 --- /dev/null +++ b/.github/workflows/dependency-review.yml @@ -0,0 +1,31 @@ +# Dependency Review Action +# +# This Action will scan dependency manifest files that change as part of a Pull Request, surfacing known-vulnerable versions of the packages declared or updated in the PR. Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging. +# +# Source repository: https://github.com/actions/dependency-review-action +# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement +name: 'Dependency Review' +on: + pull_request: + +permissions: + contents: read + +jobs: + dependency-review: + runs-on: ubuntu-latest + steps: + - name: Harden Runner + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + with: + disable-sudo: true + egress-policy: block + allowed-endpoints: > + api.github.com:443 + github.com:443 + + - name: 'Checkout Repository' + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: 'Dependency Review' + uses: actions/dependency-review-action@4901385134134e04cec5fbe5ddfe3b2c5bd5d976 diff --git a/.github/workflows/first-pull-request.yml b/.github/workflows/first-pull-request.yml new file mode 100644 index 0000000..dfaca22 --- /dev/null +++ b/.github/workflows/first-pull-request.yml @@ -0,0 +1,54 @@ +name: First Pull Request + +on: + pull_request_target: + types: + - opened + +jobs: + welcome: + name: Welcome + runs-on: ubuntu-latest + steps: + - name: Harden Runner + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + with: + disable-sudo: true + egress-policy: block + allowed-endpoints: > + api.github.com:443 + + - uses: actions/github-script@v6 + with: + script: | + // Get a list of all issues created by the PR opener + // See: https://octokit.github.io/rest.js/#pagination + const creator = context.payload.sender.login + const opts = github.rest.issues.listForRepo.endpoint.merge({ + ...context.issue, + creator, + state: 'all' + }) + const issues = await github.paginate(opts) + + for (const issue of issues) { + if (issue.number === context.issue.number) { + continue + } + + if (issue.pull_request) { + return // Creator is already a contributor. + } + } + + await github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: `**Welcome**, new contributor! + + It appears that this is your first Pull Request. To give credit where it's due, we ask that you add your information to the \`AUTHORS.rst\` and \`.zenodo.json\`: + - [ ] The relevant author information has been added to \`AUTHORS.rst\` and \`.zenodo.json\`. + + Please make sure you've read our [contributing guide](CONTRIBUTING.rst). We look forward to reviewing your Pull Request shortly ✨` + }) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 367000b..d7df56c 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -1,61 +1,89 @@ -# This workflow will install Python dependencies, run tests and lint with a single version of Python -# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python - -name: Testing Suite +name: xncml Testing Suite on: push: branches: - main + paths-ignore: + - .cruft.json + - CHANGES.rst + - README.rst + - pyproject.toml + - tests/test_xncml.py + - src/xncml/__init__.py pull_request: - branches: - - main + +concurrency: + # For a given workflow, if we push to the same branch, cancel all previous builds on that branch except on master. + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} permissions: contents: read jobs: - build: - name: Test (Python ${{ matrix.python-version }}) + lint: + name: Lint (Python${{ matrix.python-version }}) + runs-on: ubuntu-latest + strategy: + matrix: + python-version: + - "3.x" + steps: + - uses: actions/checkout@v4 + - name: Set up Python${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install tox + run: | + python -m pip install tox + - name: Run linting suite + run: | + python -m tox -e lint + + test-pypi: + name: Test with Python${{ matrix.python-version }} (Python${{ matrix.python-version }} + tox) + needs: lint runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] + include: + - tox-env: "py38" + python-version: "3.8" + - tox-env: "py39" + python-version: "3.9" + - tox-env: "py310" + python-version: "3.10" + - tox-env: "py311" + python-version: "3.11" + - tox-env: "py312" + python-version: "3.12" steps: - - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - name: Upgrade pip and install coveralls - run: | - python -m pip install --upgrade pip - python -m pip install --upgrade coveralls - - name: Install Package (editable) - run: | - python -m pip install -e ".[dev]" - - name: Check versions - run: | - python -m pip list - python -m pip check - - name: Test with pytest - run: | - pytest - coveralls - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - COVERALLS_FLAG_NAME: run-{{ matrix.python-version }} - COVERALLS_PARALLEL: true - COVERALLS_SERVICE_NAME: github + - uses: actions/checkout@v4 + - name: Set up Python${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install tox + run: | + python -m pip install tox + - name: Test with tox + run: | + python -m tox -e ${{ matrix.tox-env }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + COVERALLS_FLAG_NAME: run-${{ matrix.tox-env }} + COVERALLS_PARALLEL: true + COVERALLS_SERVICE_NAME: github finish: - name: Coveralls Report needs: - - build + - test-pypi runs-on: ubuntu-latest container: python:3-slim steps: - - name: Report to Coveralls + - name: Coveralls Finished run: | python -m pip install --upgrade coveralls python -m coveralls --finish diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml new file mode 100644 index 0000000..f91b018 --- /dev/null +++ b/.github/workflows/publish-pypi.yml @@ -0,0 +1,42 @@ +name: Publish Python 🐍 distributions 📦 to PyPI + +on: + release: + types: + - published + +permissions: + contents: read + +jobs: + build-n-publish-pypi: + name: Build and publish Python 🐍 distributions 📦 to PyPI + runs-on: ubuntu-latest + environment: production + permissions: + # IMPORTANT: this permission is mandatory for trusted publishing + id-token: write + steps: + - name: Harden Runner + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + with: + disable-sudo: true + egress-policy: block + allowed-endpoints: > + files.pythonhosted.org:443 + github.com:443 + pypi.org:443 + upload.pypi.org:443 + - uses: actions/checkout@v4 + - name: Set up Python3 + uses: actions/setup-python@v4 + with: + python-version: "3.x" + - name: Install packaging libraries + run: | + python -m pip install flit + - name: Build a binary wheel and a source tarball + run: | + python -m flit build + - name: Publish distribution 📦 to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml deleted file mode 100644 index cb417c9..0000000 --- a/.github/workflows/python-publish.yml +++ /dev/null @@ -1,42 +0,0 @@ -# This workflow will upload a Python Package using Twine when a release is created -# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries - -# This workflow uses actions that are not certified by GitHub. -# They are provided by a third-party and are governed by -# separate terms of service, privacy policy, and support -# documentation. - -name: Upload Python Package - -on: - release: - types: - - published - -permissions: - contents: read - -jobs: - deploy: - name: Publish (PyPI) - runs-on: ubuntu-latest - # We should be making use of deployment environments for this job - # https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment - steps: - - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v3 - with: - python-version: '3.x' - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install build - - name: Build package - run: | - python -m build - - name: Publish package - uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 - with: - user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml new file mode 100644 index 0000000..cb4f4ba --- /dev/null +++ b/.github/workflows/scorecard.yml @@ -0,0 +1,82 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '41 8 * * 4' + push: + branches: + - master + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + steps: + - name: Harden Runner + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + with: + disable-sudo: true + egress-policy: block + allowed-endpoints: > + api.github.com:443 + api.osv.dev:443 + api.securityscorecards.dev:443 + fulcio.sigstore.dev:443 + github.com:443 + oss-fuzz-build-logs.storage.googleapis.com:443 + rekor.sigstore.dev:443 + tuf-repo-cdn.sigstore.dev:443 + www.bestpractices.dev:443 + + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 + with: + persist-credentials: false + + - name: Run analysis + uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 + with: + results_file: results.sarif + results_format: sarif + # This job step requires a personal access token named `OPENSSF_SCORECARD_TOKEN` with the following privileges: + # - Administration: Read-Only + # - Metadata: Read-Only + # - Webhooks: Read-Only + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + repo_token: ${{ secrets.OPENSSF_SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: Upload artifact + uses: actions/upload-artifact@694cdabd8bdb0f10b2cea11669e1bf5453eed0a6 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: Upload to code-scanning + uses: github/codeql-action/upload-sarif@e5f05b81d5b6ff8cfa111c80c22c5fd02a384118 # 3.23.0 + with: + sarif_file: results.sarif diff --git a/.github/workflows/tag-testpypi.yml b/.github/workflows/tag-testpypi.yml new file mode 100644 index 0000000..13487a3 --- /dev/null +++ b/.github/workflows/tag-testpypi.yml @@ -0,0 +1,69 @@ +name: Publish Python 🐍 distributions 📦 to TestPyPI + +on: + push: + tags: + - 'v*.*' # Push events to matching v*, i.e. v1.0, v20.15.10 + +permissions: + contents: read + +jobs: + release: + name: Create Release from tag + runs-on: ubuntu-latest + if: startsWith(github.ref, 'refs/tags/v') && endsWith(github.ref, '.0') + permissions: + contents: write + steps: + - name: Harden Runner + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + with: + egress-policy: audit + - name: Checkout code + uses: actions/checkout@v4 + - name: Create Release + uses: softprops/action-gh-release@v1 + env: + # This token is provided by Actions, you do not need to create your own token + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ github.ref_name }} + name: Release ${{ github.ref_name }} + draft: true + prerelease: false + + deploy-testpypi: + name: Build and publish Python 🐍 distributions 📦 to TestPyPI + runs-on: ubuntu-latest + environment: staging + permissions: + # IMPORTANT: this permission is mandatory for trusted publishing + id-token: write + steps: + - name: Harden Runner + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + with: + disable-sudo: true + egress-policy: block + allowed-endpoints: > + files.pythonhosted.org:443 + github.com:443 + pypi.org:443 + test.pypi.org:443 + - uses: actions/checkout@v4 + - name: Set up Python3 + uses: actions/setup-python@v4 + with: + python-version: "3.x" + - name: Install packaging libraries + run: | + python -m pip install flit + - name: Build a binary wheel and a source tarball + run: | + python -m flit build + - name: Publish distribution 📦 to Test PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + repository-url: https://test.pypi.org/legacy/ + skip-existing: true diff --git a/.github/workflows/workflow-warning.yml b/.github/workflows/workflow-warning.yml new file mode 100644 index 0000000..433881b --- /dev/null +++ b/.github/workflows/workflow-warning.yml @@ -0,0 +1,69 @@ +name: Workflow Changes Warnings + +on: + # Note: potential security risk from this action using pull_request_target. + # Do not add actions in here which need a checkout of the repo, and do not use any caching in here. + # See: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target + pull_request_target: + types: + - opened + - reopened + - synchronize + paths: + - .github/workflows/*.yml + +permissions: + contents: read + +jobs: + comment-concerning-workflow-changes: + name: Comment Concerning Workflow Changes + runs-on: ubuntu-latest + if: | + (github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name) + permissions: + contents: read + pull-requests: write + steps: + - name: Harden Runner + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + with: + disable-sudo: true + egress-policy: block + allowed-endpoints: > + api.github.com:443 + - name: Find comment + uses: peter-evans/find-comment@a54c31d7fa095754bfef525c0c8e5e5674c4b4b1 # v2.4.0 + id: fc + with: + issue-number: ${{ github.event.pull_request.number }} + comment-author: 'github-actions[bot]' + body-includes: | + This Pull Request modifies GitHub workflows and is coming from a fork. + - name: Create comment + if: | + (steps.fc.outputs.comment-id == '') && + (!contains(github.event.pull_request.labels.*.name, 'approved')) && + (github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name) + uses: peter-evans/create-or-update-comment@23ff15729ef2fc348714a3bb66d2f655ca9066f2 # v3.1.0 + with: + comment-id: ${{ steps.fc.outputs.comment-id }} + issue-number: ${{ github.event.pull_request.number }} + body: | + > **Warning** + > This Pull Request modifies GitHub Workflows and is coming from a fork. + **It is very important for the reviewer to ensure that the workflow changes are appropriate.** + edit-mode: replace + - name: Update comment + if: | + contains(github.event.pull_request.labels.*.name, 'approved') + uses: peter-evans/create-or-update-comment@23ff15729ef2fc348714a3bb66d2f655ca9066f2 # v3.1.0 + with: + comment-id: ${{ steps.fc.outputs.comment-id }} + issue-number: ${{ github.event.pull_request.number }} + body: | + > **Note** + > Changes have been approved by a maintainer. + reactions: | + hooray + edit-mode: append From c50ba40decc732b2f0643af092afc818f3c39223 Mon Sep 17 00:00:00 2001 From: Abel Aoun Date: Mon, 15 Apr 2024 17:28:18 +0200 Subject: [PATCH 06/26] maint: Apply code review suggestions Co-authored-by: Francis Charette-Migneault --- .zenodo.json | 16 ++++++++ CHANGELOG.rst | 13 ++++++ CONTRIBUTING.rst | 72 ++++++++++++++++----------------- Makefile | 12 +++--- README.rst | 17 ++++---- docs/apidoc/modules.rst | 7 ++++ docs/apidoc/xncml.generated.rst | 20 +++++++++ docs/apidoc/xncml.rst | 37 +++++++++++++++++ pyproject.toml | 13 +++--- 9 files changed, 151 insertions(+), 56 deletions(-) create mode 100644 .zenodo.json create mode 100644 docs/apidoc/modules.rst create mode 100644 docs/apidoc/xncml.generated.rst create mode 100644 docs/apidoc/xncml.rst diff --git a/.zenodo.json b/.zenodo.json new file mode 100644 index 0000000..a1b2924 --- /dev/null +++ b/.zenodo.json @@ -0,0 +1,16 @@ +{ + "title": "xncml", + "creators": [ + { + "name": "Anderson Banihirwe" + } + ], + "keywords": [ + "xncml" + ], + "license": "Apache-2.0", + "language": "eng", + "communities": [], + "upload_type": "software", + "access_right": "open" +} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 5b6ce5e..5715fa6 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,8 @@ Changelog ========= +.. _changes-0.5.0: + 0.5.0 (unreleased) ------------------ @@ -9,11 +11,14 @@ Changelog Breaking changes ^^^^^^^^^^^^^^^^ + - Nested group handling: Before this version, all groups were read, but conflicting variable names in-between groups would shadow data. Now, similarly to xarray ``open_dataset``, ``open_ncml`` accepts an optional ``group`` argument to specify which group should be read. When ``group`` is not specified, it defaults to the root group. Additionally ``group`` can be set to ``'*'`` so that every group is read and the hierarchy is flattened. In the event of conflicting variable/dimension names across groups, the conflicting name will be modified by appending ``'__n'`` where n is incremented. - Enums are no longer transformed into CF flag_values and flag_meanings attributes, instead they are stored in the ``encoding["dtype"].metadata`` of their respective variable. This is aligned with what is done on xarray v2024.01.0 - [fix] scalar attributes that are not strings are no longer wrapped in tuples of length 1. +.. _changes-0.4.0: + 0.4.0 (2024-01-08) ------------------ @@ -22,6 +27,8 @@ Breaking changes - Add support for scalar variables. By @Bzah - [fix] empty attributes are now parsed into an empty string instead of crashing the parser. By @Bzah +.. _changes-0.3.1: + 0.3.1 (2023-11-10) ------------------ @@ -29,6 +36,8 @@ Breaking changes - Drop support for Python 3.8 +.. _changes-0.3: + 0.3 (2023-08-28) ---------------- @@ -38,6 +47,8 @@ Breaking changes - Add `Dataset.from_text` classmethod to create a `Dataset` from an XML string. By @huard +.. _changes-0.2: + 0.2 (2023-02-23) ---------------- @@ -47,6 +58,8 @@ Breaking changes - Implement `Dataset.to_cf_dict` method to export CF-JSON dictionary. By @huard. +.. _changes-0.1: + 0.1 Initial release (2022-11-24) -------------------------------- diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index b976dfe..f40a1e0 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -61,46 +61,46 @@ Ready to contribute? Here's how to set up ``xncml`` for local development. #. Fork the ``xncml`` repo on GitHub. #. Clone your fork locally:: - $ git clone git@github.com:your_name_here/xncml.git + git clone git@github.com:your_name_here/xncml.git #. Install your local copy into a development environment. Using ``virtualenv`` (``virtualenvwrapper``), you can create a new development environment with:: - $ python -m pip install flit virtualenvwrapper - $ mkvirtualenv xncml - $ cd xncml/ - $ flit install --symlink + python -m pip install flit virtualenvwrapper + mkvirtualenv xncml + cd xncml/ + flit install --symlink This installs ``xncml`` in an "editable" state, meaning that changes to the code are immediately seen by the environment. #. To ensure a consistent coding style, install the ``pre-commit`` hooks to your local clone:: - $ pre-commit install + pre-commit install On commit, ``pre-commit`` will check that ``flake8``, and ``ruff`` checks are passing, perform automatic fixes if possible, and warn of violations that require intervention. If your commit fails the checks initially, simply fix the errors, re-add the files, and re-commit. You can also run the hooks manually with:: - $ pre-commit run -a + pre-commit run -a If you want to skip the ``pre-commit`` hooks temporarily, you can pass the ``--no-verify`` flag to `$ git commit`. #. Create a branch for local development:: - $ git checkout -b name-of-your-bugfix-or-feature + git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. #. When you're done making changes, we **strongly** suggest running the tests in your environment or with the help of ``tox``:: - $ python -m pytest + python -m pytest # Or, to run multiple build tests - $ tox + tox #. Commit your changes and push your branch to GitHub:: - $ git add . - $ git commit -m "Your detailed description of your changes." - $ git push origin name-of-your-bugfix-or-feature + git add . + git commit -m "Your detailed description of your changes." + git push origin name-of-your-bugfix-or-feature If ``pre-commit`` hooks fail, try re-committing your changes (or, if need be, you can skip them with `$ git commit --no-verify`). @@ -109,12 +109,12 @@ Ready to contribute? Here's how to set up ``xncml`` for local development. #. When pushing your changes to your branch on GitHub, the documentation will automatically be tested to reflect the changes in your Pull Request. This build process can take several minutes at times. If you are actively making changes that affect the documentation and wish to save time, you can compile and test your changes beforehand locally with:: # To generate the html and open it in your browser - $ make docs + make docs # To only generate the html - $ make autodoc - $ make -C docs html + make autodoc + make -C docs html # To simply test that the docs pass build checks - $ tox -e docs + tox -e docs #. Once your Pull Request has been accepted and merged to the ``main`` branch, several automated workflows will be triggered: @@ -144,11 +144,11 @@ $ pytest tests.test_xncml To run specific code style checks:: - $ black --check xncml tests - $ isort --check xncml tests - $ blackdoc --check xncml docs - $ ruff xncml tests - $ flake8 xncml tests + black --check xncml tests + isort --check xncml tests + blackdoc --check xncml docs + ruff xncml tests + flake8 xncml tests To get ``black``, ``isort``, ``blackdoc``, ``ruff``, and ``flake8`` (with plugins ``flake8-alphabetize`` and ``flake8-rst-docstrings``) simply install them with `pip` into your environment. @@ -159,20 +159,20 @@ A reminder for the **maintainers** on how to deploy. This section is only releva .. warning:: - It is important to be aware that any changes to files found within the ``xncml`` folder (with the exception of ``xncml/__init__.py``) will trigger the ``bump-version.yml`` workflow. Be careful not to commit changes to files in this folder when preparing a new release. + It is important to be aware that any changes to files found within the ``xncml`` folder (with the exception of ``src/xncml/__init__.py``) will trigger the ``bump-version.yml`` workflow. Be careful not to commit changes to files in this folder when preparing a new release. #. Create a new branch from `main` (e.g. `release-0.2.0`). #. Update the `CHANGES.rst` file to change the `Unreleased` section to the current date. #. Bump the version in your branch to the next version (e.g. `v0.1.0 -> v0.2.0`):: - $ bump-my-version bump minor # In most cases, we will be releasing a minor version - $ git push + bump-my-version bump minor # In most cases, we will be releasing a minor version + git push #. Create a pull request from your branch to `main`. #. Once the pull request is merged, create a new release on GitHub. On the main branch, run:: - $ git tag v0.2.0 - $ git push --tags + git tag v0.2.0 + git push --tags This will trigger a GitHub workflow to build the package and upload it to TestPyPI. At the same time, the GitHub workflow will create a draft release on GitHub. Assuming that the workflow passes, the final release can then be published on GitHub by finalizing the draft release. @@ -192,15 +192,15 @@ The simple approach The simplest approach to packaging for general support (pip wheels) requires that ``flit`` be installed:: - $ python -m pip install flit + python -m pip install flit From the command line on your Linux distribution, simply run the following from the clone's main dev branch:: # To build the packages (sources and wheel) - $ python -m flit build + python -m flit build # To upload to PyPI - $ python -m flit publish dist/* + python -m flit publish dist/* The new version based off of the version checked out will now be available via `pip` (`$ pip install xncml`). @@ -216,8 +216,8 @@ Before preparing an initial release on conda-forge, we *strongly* suggest consul In order to create a new conda build recipe, to be used when proposing packages to the conda-forge repository, we strongly suggest using the ``grayskull`` tool:: - $ python -m pip install grayskull - $ grayskull pypi xncml + python -m pip install grayskull + grayskull pypi xncml For more information on ``grayskull``, please see the following link: https://github.com/conda/grayskull @@ -242,17 +242,17 @@ docker images (at time of writing, we endorse using `manylinux_2_24_x86_64`). With `docker` installed and running, begin by pulling the image:: - $ sudo docker pull quay.io/pypa/manylinux_2_24_x86_64 + sudo docker pull quay.io/pypa/manylinux_2_24_x86_64 From the xncml source folder we can enter into the docker container, providing access to the `xncml` source files by linking them to the running image:: - $ sudo docker run --rm -ti -v $(pwd):/xncml -w /xncml quay.io/pypa/manylinux_2_24_x86_64 bash + sudo docker run --rm -ti -v $(pwd):/xncml -w /xncml quay.io/pypa/manylinux_2_24_x86_64 bash Finally, to build the wheel, we run it against the provided Python3.9 binary:: - $ /opt/python/cp39-cp39m/bin/python -m build --sdist --wheel + /opt/python/cp39-cp39m/bin/python -m build --sdist --wheel This will then place two files in `xncml/dist/` ("xncml-1.2.3-py3-none-any.whl" and "xncml-1.2.3.tar.gz"). We can now leave our docker container (`$ exit`) and continue with uploading the files to PyPI:: - $ twine upload dist/* + twine upload dist/* diff --git a/Makefile b/Makefile index 56d3f2e..b4649b6 100644 --- a/Makefile +++ b/Makefile @@ -53,8 +53,8 @@ clean-test: ## remove test and coverage artifacts rm -fr .pytest_cache lint/flake8: ## check style with flake8 - ruff xncml tests - flake8 --config=.flake8 xncml tests + ruff src/xncml tests + flake8 --config=.flake8 src/xncml tests lint: lint/flake8 ## check style @@ -65,13 +65,13 @@ test-all: ## run tests on every Python version with tox tox coverage: ## check code coverage quickly with the default Python - coverage run --source xncml -m pytest + coverage run --source src/xncml -m pytest coverage report -m coverage html $(BROWSER) htmlcov/index.html autodoc: clean-docs ## create sphinx-apidoc files: - sphinx-apidoc -o docs/apidoc --private --module-first xncml + sphinx-apidoc -o docs/apidoc --private --module-first src/xncml linkcheck: autodoc ## run checks over all external links found throughout the documentation $(MAKE) -C docs linkcheck @@ -93,7 +93,7 @@ release: dist ## package and upload a release python -m flit publish dist/* install: clean ## install the package to the active Python's site-packages - python -m flit install + python -m pip install .[all] dev: clean ## install the package to the active Python's site-packages - python -m flit install --symlink + python -m pip install --editable .[all] diff --git a/README.rst b/README.rst index 1a4d843..5195d10 100644 --- a/README.rst +++ b/README.rst @@ -13,10 +13,11 @@ These tools allow you to modify NcML by: and read NcML files into `xarray.Dataset` objects: -```python -import xncml -ds = xncml.open_ncml("large_ensemble.ncml") -``` +.. code-block:: python + + import xncml + ds = xncml.open_ncml("large_ensemble.ncml") + See `doc`_ for more information. @@ -31,7 +32,7 @@ To install xncml, run this command in your terminal: .. code-block:: console - $ python -m pip install xncml + python -m pip install xncml This is the preferred method to install xncml, as it will always install the most recent stable release. @@ -51,19 +52,19 @@ You can either clone the public repository: .. code-block:: console - $ git clone git@github.com:xarray-contrib/xncml/ + git clone git@github.com:xarray-contrib/xncml/ Or download the `tarball`_: .. code-block:: console - $ curl -OJL https://github.com/xarray-contrib/xncml/tarball/main + curl -OJL https://github.com/xarray-contrib/xncml/tarball/main Once you have a copy of the source, you can install it with: .. code-block:: console - $ python -m pip install . + python -m pip install . .. _doc: https://readthedocs.org/projects/xncml .. _Github repo: https://github.com/xarray-contrib/xncml/ diff --git a/docs/apidoc/modules.rst b/docs/apidoc/modules.rst new file mode 100644 index 0000000..665661a --- /dev/null +++ b/docs/apidoc/modules.rst @@ -0,0 +1,7 @@ +xncml +===== + +.. toctree:: + :maxdepth: 4 + + xncml diff --git a/docs/apidoc/xncml.generated.rst b/docs/apidoc/xncml.generated.rst new file mode 100644 index 0000000..5a6498a --- /dev/null +++ b/docs/apidoc/xncml.generated.rst @@ -0,0 +1,20 @@ +xncml.generated package +======================= + +.. automodule:: xncml.generated + :members: + :undoc-members: + :show-inheritance: + :private-members: + +Submodules +---------- + +xncml.generated.ncml\_2\_2 module +--------------------------------- + +.. automodule:: xncml.generated.ncml_2_2 + :members: + :undoc-members: + :show-inheritance: + :private-members: diff --git a/docs/apidoc/xncml.rst b/docs/apidoc/xncml.rst new file mode 100644 index 0000000..dd48df9 --- /dev/null +++ b/docs/apidoc/xncml.rst @@ -0,0 +1,37 @@ +xncml package +============= + +.. automodule:: xncml + :members: + :undoc-members: + :show-inheritance: + :private-members: + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + xncml.generated + +Submodules +---------- + +xncml.core module +----------------- + +.. automodule:: xncml.core + :members: + :undoc-members: + :show-inheritance: + :private-members: + +xncml.parser module +------------------- + +.. automodule:: xncml.parser + :members: + :undoc-members: + :show-inheritance: + :private-members: diff --git a/pyproject.toml b/pyproject.toml index 172b7ed..cb01c17 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,8 +9,7 @@ authors = [ ] maintainers = [ {name = "Anderson Banihirwe", email = "abanihi@ucar.edu"}, - {name = "David Huard"}, - {name = "Abel Aoun"} + {name = "David Huard"} ] description = "Tools for manipulating and opening NCML (NetCDF Markup) files with/for xarray" readme = {file = "README.rst", content-type = "text/x-rst"} @@ -71,8 +70,10 @@ docs = [ "pandoc", "ipython", "ipykernel", - "jupyter_client" + "jupyter_client", + "numpydoc" ] +all = ["xncml[dev]", "xncml[docs]"] [project.urls] "Source" = "https://github.com/xarray-contrib/xncml" @@ -93,7 +94,7 @@ serialize = [ ] [[tool.bumpversion.files]] -filename = "xncml/__init__.py" +filename = "src/xncml/__init__.py" search = "__version__ = \"{current_version}\"" replace = "__version__ = \"{new_version}\"" @@ -119,7 +120,7 @@ values = [ [tool.coverage.run] relative_files = true -include = ["xncml/*"] +include = ["src/xncml/*"] omit = ["tests/*.py"] [tool.flit.sdist] @@ -212,7 +213,7 @@ no-lines-before = ["future", "standard-library"] max-complexity = 15 [tool.ruff.lint.per-file-ignores] -"xncml/**/__init__.py" = ["F401", "F403"] +"src/xncml/**/__init__.py" = ["F401", "F403"] [tool.ruff.lint.pycodestyle] max-doc-length = 180 From 26ce02a2a588e44f653cb3e0b546ef45adaffd94 Mon Sep 17 00:00:00 2001 From: Abel Aoun Date: Mon, 15 Apr 2024 17:43:32 +0200 Subject: [PATCH 07/26] enh: Add bump-my-version template for CHANGELOG Co-authored-by: Francis Charette-Migneault --- CHANGELOG.rst | 6 ++---- pyproject.toml | 24 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 5715fa6..92d3ba8 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,10 +1,8 @@ Changelog ========= -.. _changes-0.5.0: - -0.5.0 (unreleased) ------------------- +`Unreleased `_ (latest) +============================================================================ - Added support for running `pytest` with `pytest-cov`. By @Zeitsperre - Reworked the GitHub CI testing workflow to perform version checks as well as tests with `pytest-cov` . By @Zeitsperre diff --git a/pyproject.toml b/pyproject.toml index cb01c17..14b8470 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -108,6 +108,30 @@ filename = ".cruft.json" search = "\"version\": \"{current_version}\"" replace = "\"version\": \"{new_version}\"" +[[tool.bumpversion.files]] +filename = "CHANGELOG.rst" +search = """\ +`Unreleased `_ (latest) +============================================================================ +""" +replace = """\ +`Unreleased `_ (latest) +============================================================================ + +Changes: +-------- +- No change. + +Fixes: +------ +- No change. + +.. _changes_{new_version}: + +`{new_version} `_ ({now:%%Y-%%m-%%d}) + ======================================================================== +""" + [tool.bumpversion.parts.build] independent = false From 45a44766a5c53f9a0094c33c42ce638a39ab7c7a Mon Sep 17 00:00:00 2001 From: Abel Aoun Date: Fri, 3 May 2024 14:44:01 +0200 Subject: [PATCH 08/26] fix: lint issues --- .flake8 | 1 + .pre-commit-config.yaml | 5 ----- CONTRIBUTING.rst | 38 ++++---------------------------------- pyproject.toml | 7 +++++-- src/xncml/__init__.py | 4 ++-- src/xncml/core.py | 23 +++++++++++++++-------- src/xncml/parser.py | 11 +---------- 7 files changed, 28 insertions(+), 61 deletions(-) diff --git a/.flake8 b/.flake8 index 1116575..79b2dbd 100644 --- a/.flake8 +++ b/.flake8 @@ -9,6 +9,7 @@ ignore = AZ100, AZ200, AZ300, + AZ400 C, D, E, diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 46baad3..e8db23a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,7 +11,6 @@ repos: - id: check-json - id: check-yaml - id: check-toml - - id: double-quote-string-fixer - repo: https://github.com/pappasam/toml-sort rev: v0.23.1 hooks: @@ -30,10 +29,6 @@ repos: - id: ruff args: [ --fix ] - id: ruff-format - - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.7 - hooks: - - id: ruff - repo: https://github.com/pycqa/flake8 rev: 7.0.0 hooks: diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index f40a1e0..3a924a3 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -1,6 +1,3 @@ -.. highlight:: shell - -============ Contributing ============ @@ -82,7 +79,7 @@ Ready to contribute? Here's how to set up ``xncml`` for local development. pre-commit run -a - If you want to skip the ``pre-commit`` hooks temporarily, you can pass the ``--no-verify`` flag to `$ git commit`. + If you want to skip the ``pre-commit`` hooks temporarily, you can pass the ``--no-verify`` flag to `git commit`. #. Create a branch for local development:: @@ -102,7 +99,7 @@ Ready to contribute? Here's how to set up ``xncml`` for local development. git commit -m "Your detailed description of your changes." git push origin name-of-your-bugfix-or-feature - If ``pre-commit`` hooks fail, try re-committing your changes (or, if need be, you can skip them with `$ git commit --no-verify`). + If ``pre-commit`` hooks fail, try re-committing your changes (or, if need be, you can skip them with `git commit --no-verify`). #. Submit a `Pull Request `_ through the GitHub website. @@ -140,7 +137,7 @@ Tips To run a subset of tests:: -$ pytest tests.test_xncml +pytest tests.test_xncml To run specific code style checks:: @@ -202,7 +199,7 @@ From the command line on your Linux distribution, simply run the following from # To upload to PyPI python -m flit publish dist/* -The new version based off of the version checked out will now be available via `pip` (`$ pip install xncml`). +The new version based off of the version checked out will now be available via `pip` (`pip install xncml`). Releasing on conda-forge ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -229,30 +226,3 @@ Subsequent releases ^^^^^^^^^^^^^^^^^^^ If the conda-forge feedstock recipe is built from PyPI, then when a new release is published on PyPI, `regro-cf-autotick-bot` will open Pull Requests automatically on the conda-forge feedstock. It is up to the conda-forge feedstock maintainers to verify that the package is building properly before merging the Pull Request to the main branch. - -Building sources for wide support with `manylinux` image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. warning:: - This section is for building source files that link to or provide links to C/C++ dependencies. - It is not necessary to perform the following when building pure Python packages. - -In order to do ensure best compatibility across architectures, we suggest building wheels using the `PyPA`'s `manylinux` -docker images (at time of writing, we endorse using `manylinux_2_24_x86_64`). - -With `docker` installed and running, begin by pulling the image:: - - sudo docker pull quay.io/pypa/manylinux_2_24_x86_64 - -From the xncml source folder we can enter into the docker container, providing access to the `xncml` source files by linking them to the running image:: - - sudo docker run --rm -ti -v $(pwd):/xncml -w /xncml quay.io/pypa/manylinux_2_24_x86_64 bash - -Finally, to build the wheel, we run it against the provided Python3.9 binary:: - - /opt/python/cp39-cp39m/bin/python -m build --sdist --wheel - -This will then place two files in `xncml/dist/` ("xncml-1.2.3-py3-none-any.whl" and "xncml-1.2.3.tar.gz"). -We can now leave our docker container (`$ exit`) and continue with uploading the files to PyPI:: - - twine upload dist/* diff --git a/pyproject.toml b/pyproject.toml index 14b8470..3077f54 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -128,8 +128,8 @@ Fixes: .. _changes_{new_version}: -`{new_version} `_ ({now:%%Y-%%m-%%d}) - ======================================================================== +`{new_version} dict: }, ) - def __repr__(self): + def __repr__(self) -> str: + """Return a string representation of the parsed xml""" return xmltodict.unparse(self.ncroot, pretty=True) # Aggregations and scans @@ -70,12 +77,12 @@ def add_aggregation(self, dim_name: str, type_: str, recheck_every: str = None, ---------- dim_name : str Dimension name. - type_ : str + ``type_`` : str Aggregation type. recheck_every : str - Time interval for rechecking the aggregation. Only used if `type_` is `AggregationType.scan`. + Time interval for rechecking the aggregation. Only used if ``type_`` is ``AggregationType.scan``. time_units_change : bool - Whether the time units change. Only used if `type_` is `AggregationType.scan`. + Whether the time units change. Only used if ``type_`` is ``AggregationType.scan``. """ at = AggregationType(type_) item = OrderedDict( @@ -189,7 +196,7 @@ def add_variable_attribute(self, variable, key, value, type_="String"): Attribute name value : object Attribute value. Must be a serializable Python Object - type_ : str, default: 'String' + ``type_`` : str, default: 'String' String describing attribute type. """ @@ -329,7 +336,7 @@ def add_dataset_attribute(self, key, value, type_="String"): Attribute name. value : object Attribute value. Must be a serializable Python Object. - type_ : str, default: 'String' + ``type_`` : str, default: 'String' String describing attribute type. """ diff --git a/src/xncml/parser.py b/src/xncml/parser.py index 632bc4f..1a0b660 100644 --- a/src/xncml/parser.py +++ b/src/xncml/parser.py @@ -438,16 +438,7 @@ def read_coord_value(nc: Netcdf, agg: Aggregation, dtypes: list = ()): def read_enum(obj: EnumTypedef) -> dict[str, list]: - """ - Parse element. - - Example - ------- - - false - true - undefined - + """Parse element. Parameters ---------- From 286ac0693dbb8a2e1d34e0fe5c8b55bddc0217ca Mon Sep 17 00:00:00 2001 From: Abel Aoun Date: Tue, 7 May 2024 17:30:59 +0200 Subject: [PATCH 09/26] fix: remove tox 38 xncml no longer support python 3.8 --- .github/workflows/main.yml | 2 -- tox.ini | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d7df56c..d4d341f 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -49,8 +49,6 @@ jobs: strategy: matrix: include: - - tox-env: "py38" - python-version: "3.8" - tox-env: "py39" python-version: "3.9" - tox-env: "py310" diff --git a/tox.ini b/tox.ini index 8b4f9be..a69f3c6 100644 --- a/tox.ini +++ b/tox.ini @@ -2,7 +2,7 @@ min_version = 4.0 envlist = lint - py{38,39,310,311,312} + py{39,310,311,312} docs coveralls requires = From 131fcb659eec543f8d3af467ab522058890d4be3 Mon Sep 17 00:00:00 2001 From: Abel Aoun Date: Mon, 13 May 2024 09:43:33 +0200 Subject: [PATCH 10/26] maint: Remove bump templating of CHANGELOG --- CHANGELOG.rst | 2 +- pyproject.toml | 24 ------------------------ 2 files changed, 1 insertion(+), 25 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 92d3ba8..ce1ea27 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,7 +2,7 @@ Changelog ========= `Unreleased `_ (latest) -============================================================================ +---------------------------------------------------------------------------- - Added support for running `pytest` with `pytest-cov`. By @Zeitsperre - Reworked the GitHub CI testing workflow to perform version checks as well as tests with `pytest-cov` . By @Zeitsperre diff --git a/pyproject.toml b/pyproject.toml index 3077f54..6214d2d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -108,30 +108,6 @@ filename = ".cruft.json" search = "\"version\": \"{current_version}\"" replace = "\"version\": \"{new_version}\"" -[[tool.bumpversion.files]] -filename = "CHANGELOG.rst" -search = """\ -`Unreleased `_ (latest) -============================================================================ -""" -replace = """\ -`Unreleased `_ (latest) -============================================================================ - -Changes: --------- -- No change. - -Fixes: ------- -- No change. - -.. _changes_{new_version}: - -`{new_version} Date: Mon, 13 May 2024 10:42:22 +0200 Subject: [PATCH 11/26] maint: Create releasing.rst Moved the 'how to release' from the contributing guide to a dedicated releasing.rst guide. --- CONTRIBUTING.rst | 78 ---------------------------- docs/releasing.rst | 127 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+), 78 deletions(-) create mode 100644 docs/releasing.rst diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 3a924a3..ba0f0d6 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -148,81 +148,3 @@ To run specific code style checks:: flake8 xncml tests To get ``black``, ``isort``, ``blackdoc``, ``ruff``, and ``flake8`` (with plugins ``flake8-alphabetize`` and ``flake8-rst-docstrings``) simply install them with `pip` into your environment. - -Versioning/Tagging ------------------- - -A reminder for the **maintainers** on how to deploy. This section is only relevant when producing a new point release for the package. - -.. warning:: - - It is important to be aware that any changes to files found within the ``xncml`` folder (with the exception of ``src/xncml/__init__.py``) will trigger the ``bump-version.yml`` workflow. Be careful not to commit changes to files in this folder when preparing a new release. - -#. Create a new branch from `main` (e.g. `release-0.2.0`). -#. Update the `CHANGES.rst` file to change the `Unreleased` section to the current date. -#. Bump the version in your branch to the next version (e.g. `v0.1.0 -> v0.2.0`):: - - bump-my-version bump minor # In most cases, we will be releasing a minor version - git push - -#. Create a pull request from your branch to `main`. -#. Once the pull request is merged, create a new release on GitHub. On the main branch, run:: - - git tag v0.2.0 - git push --tags - - This will trigger a GitHub workflow to build the package and upload it to TestPyPI. At the same time, the GitHub workflow will create a draft release on GitHub. Assuming that the workflow passes, the final release can then be published on GitHub by finalizing the draft release. - -#. Once the release is published, the `publish-pypi.yml` workflow will go into an `awaiting approval` mode on Github Actions. Only authorized users may approve this workflow (notifications will be sent) to trigger the upload to PyPI. - -.. warning:: - - Uploads to PyPI can **never** be overwritten. If you make a mistake, you will need to bump the version and re-release the package. If the package uploaded to PyPI is broken, you should modify the GitHub release to mark the package as broken, as well as yank the package (mark the version "broken") on PyPI. - -Packaging ---------- - -When a new version has been minted (features have been successfully integrated test coverage and stability is adequate), maintainers should update the pip-installable package (wheel and source release) on PyPI as well as the binary on conda-forge. - -The simple approach -~~~~~~~~~~~~~~~~~~~ - -The simplest approach to packaging for general support (pip wheels) requires that ``flit`` be installed:: - - python -m pip install flit - -From the command line on your Linux distribution, simply run the following from the clone's main dev branch:: - - # To build the packages (sources and wheel) - python -m flit build - - # To upload to PyPI - python -m flit publish dist/* - -The new version based off of the version checked out will now be available via `pip` (`pip install xncml`). - -Releasing on conda-forge -~~~~~~~~~~~~~~~~~~~~~~~~ - -Initial Release -^^^^^^^^^^^^^^^ - -Before preparing an initial release on conda-forge, we *strongly* suggest consulting the following links: - * https://conda-forge.org/docs/maintainer/adding_pkgs.html - * https://github.com/conda-forge/staged-recipes - -In order to create a new conda build recipe, to be used when proposing packages to the conda-forge repository, we strongly suggest using the ``grayskull`` tool:: - - python -m pip install grayskull - grayskull pypi xncml - -For more information on ``grayskull``, please see the following link: https://github.com/conda/grayskull - -Before updating the main conda-forge recipe, we echo the conda-forge documentation and *strongly* suggest performing the following checks: - * Ensure that dependencies and dependency versions correspond with those of the tagged version, with open or pinned versions for the `host` requirements. - * If possible, configure tests within the conda-forge build CI (e.g. `imports: xncml`, `commands: pytest xncml`). - -Subsequent releases -^^^^^^^^^^^^^^^^^^^ - -If the conda-forge feedstock recipe is built from PyPI, then when a new release is published on PyPI, `regro-cf-autotick-bot` will open Pull Requests automatically on the conda-forge feedstock. It is up to the conda-forge feedstock maintainers to verify that the package is building properly before merging the Pull Request to the main branch. diff --git a/docs/releasing.rst b/docs/releasing.rst new file mode 100644 index 0000000..f71069d --- /dev/null +++ b/docs/releasing.rst @@ -0,0 +1,127 @@ +========= +Releasing +========= + +Deployment +---------- + +A reminder for the **maintainers** on how to deploy. This section is only relevant when producing a new point release for the package. + +.. warning:: + + It is important to be aware that any changes to files found within the ``{{ cookiecutter.project_slug }}`` folder (with the exception of ``{{ cookiecutter.project_slug }}/__init__.py``) will trigger the ``bump-version.yml`` workflow. Be careful not to commit changes to files in this folder when preparing a new release. + +#. Create a new branch from `main` (e.g. `release-0.2.0`). +#. Update the `CHANGES.rst` file to change the `Unreleased` section to the current date. +#. Bump the version in your branch to the next version (e.g. `v0.1.0 -> v0.2.0`):: + + .. code-block:: console + + bump-my-version bump minor # In most cases, we will be releasing a minor version + git push + +#. Create a pull request from your branch to `main`. +#. Once the pull request is merged, create a new release on GitHub. On the main branch, run:: + + .. code-block:: console + + git tag v0.2.0 + git push --tags + + This will trigger a GitHub workflow to build the package and upload it to TestPyPI. At the same time, the GitHub workflow will create a draft release on GitHub. Assuming that the workflow passes, the final release can then be published on GitHub by finalizing the draft release. + +#. Once the release is published, the `publish-pypi.yml` workflow will go into an `awaiting approval` mode on Github Actions. Only authorized users may approve this workflow (notifications will be sent) to trigger the upload to PyPI. + +.. warning:: + + Uploads to PyPI can **never** be overwritten. If you make a mistake, you will need to bump the version and re-release the package. If the package uploaded to PyPI is broken, you should modify the GitHub release to mark the package as broken, as well as yank the package (mark the version "broken") on PyPI. + +Packaging +--------- + +When a new version has been minted (features have been successfully integrated test coverage and stability is adequate), maintainers should update the pip-installable package (wheel and source release) on PyPI as well as the binary on conda-forge. + +The simple approach +~~~~~~~~~~~~~~~~~~~ + +The simplest approach to packaging for general support (pip wheels) requires that ``flit`` be installed:: + + .. code-block:: console + + python -m pip install flit + +From the command line on your Linux distribution, simply run the following from the clone's main dev branch:: + + .. code-block:: console + + # To build the packages (sources and wheel) + make dist + + # To upload to PyPI + make release + +The new version based off of the version checked out will now be available via `pip` (`pip install {{ cookiecutter.project_slug }}`). + +Releasing on conda-forge +~~~~~~~~~~~~~~~~~~~~~~~~ + +Initial Release +^^^^^^^^^^^^^^^ + +Before preparing an initial release on conda-forge, we *strongly* suggest consulting the following links: + * https://conda-forge.org/docs/maintainer/adding_pkgs.html + * https://github.com/conda-forge/staged-recipes + +In order to create a new conda build recipe, to be used when proposing packages to the conda-forge repository, we strongly suggest using the ``grayskull`` tool:: + + .. code-block:: console + + python -m pip install grayskull + grayskull pypi {{ cookiecutter.project_slug }} + +For more information on ``grayskull``, please see the following link: https://github.com/conda/grayskull + +Before updating the main conda-forge recipe, we echo the conda-forge documentation and *strongly* suggest performing the following checks: + * Ensure that dependencies and dependency versions correspond with those of the tagged version, with open or pinned versions for the `host` requirements. + * If possible, configure tests within the conda-forge build CI (e.g. `imports: {{ cookiecutter.project_slug }}`, `commands: pytest {{ cookiecutter.project_slug }}`). + +Subsequent releases +^^^^^^^^^^^^^^^^^^^ + +If the conda-forge feedstock recipe is built from PyPI, then when a new release is published on PyPI, `regro-cf-autotick-bot` will open Pull Requests automatically on the conda-forge feedstock. It is up to the conda-forge feedstock maintainers to verify that the package is building properly before merging the Pull Request to the main branch. + +Building sources for wide support with `manylinux` image +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + This section is for building source files that link to or provide links to C/C++ dependencies. + It is not necessary to perform the following when building pure Python packages. + +In order to do ensure best compatibility across architectures, we suggest building wheels using the `PyPA`'s `manylinux` +docker images (at time of writing, we endorse using `manylinux_2_24_x86_64`). + +With `docker` installed and running, begin by pulling the image:: + + .. code-block:: console + + sudo docker pull quay.io/pypa/manylinux_2_24_x86_64 + +From the {{ cookiecutter.project_slug }} source folder we can enter into the docker container, providing access to the `{{ cookiecutter.project_slug }}` source files by linking them to the running image:: + + .. code-block:: console + + sudo docker run --rm -ti -v $(pwd):/{{ cookiecutter.project_slug }} -w /{{ cookiecutter.project_slug }} quay.io/pypa/manylinux_2_24_x86_64 bash + +Finally, to build the wheel, we run it against the provided Python3.9 binary:: + + .. code-block:: console + + /opt/python/cp39-cp39m/bin/python -m build --sdist --wheel + +This will then place two files in `{{ cookiecutter.project_slug }}/dist/` ("{{ cookiecutter.project_slug }}-1.2.3-py3-none-any.whl" and "{{ cookiecutter.project_slug }}-1.2.3.tar.gz"). +We can now leave our docker container (`exit`) and continue with uploading the files to PyPI:: + + .. code-block:: console + + python -m twine upload dist/* + From 365223bfe958647c793c41f67e7c7d19fbfe9981 Mon Sep 17 00:00:00 2001 From: Zeitsperre <10819524+Zeitsperre@users.noreply.github.com> Date: Mon, 13 May 2024 15:02:47 -0400 Subject: [PATCH 12/26] update authors and maintainers --- AUTHORS.rst | 9 +++++---- pyproject.toml | 14 ++++++++------ 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/AUTHORS.rst b/AUTHORS.rst index 27d92bb..082454a 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -4,14 +4,15 @@ Credits Development Lead ---------------- - * Anderson Banihirwe `@andersy005 `_ * David Huard `@huard `_ +Maintainers +----------- +* Abel Aoun `@bzah `_ +* Trevor James Smith `@Zeitsperre `_ + Contributors ------------- - -* Trevor James Smith `@Zeitsperre `_ * Pascal Bourgault `@Zeitsperre `_ * Francis Charette-Migneault `@fmigneault `_ -* Abel Aoun `@bzah `_ diff --git a/pyproject.toml b/pyproject.toml index 3077f54..247a97e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,9 @@ authors = [ ] maintainers = [ {name = "Anderson Banihirwe", email = "abanihi@ucar.edu"}, - {name = "David Huard"} + {name = "Abel Aoun", email = "aoun.abel@gmail.com"}, + {name = "David Huard", email = "david.huard@gmail.com"}, + {name = "Trevor James Smith", email = "trevor_smith@live.com"} ] description = "Tools for manipulating and opening NCML (NetCDF Markup) files with/for xarray" readme = {file = "README.rst", content-type = "text/x-rst"} @@ -33,13 +35,13 @@ classifiers = [ ] dynamic = ["version"] dependencies = [ - "xmltodict", - "xsdata", - "xarray", "cftime", - "netCDF4", "dask", - "psutil" + "netCDF4", + "psutil", + "xarray", + "xmltodict", + "xsdata" ] [project.optional-dependencies] From 9e066b735479b86da8c59f79a845e93e22e3160a Mon Sep 17 00:00:00 2001 From: Abel Aoun Date: Tue, 14 May 2024 14:31:54 +0200 Subject: [PATCH 13/26] enh: Upgrade via latest cookicutter template --- .cruft.json | 3 +- .gitignore | 3 ++ CHANGELOG.rst | 1 + CODE_OF_CONDUCT.rst | 84 +++++++++++++++++++++++++++++++++++++++ CONTRIBUTING.rst | 95 +++++++++++++++++++++++++++++++-------------- Makefile | 33 +++++++++++----- docs/index.rst | 1 + pyproject.toml | 66 ++++++++++++++++++++++--------- 8 files changed, 228 insertions(+), 58 deletions(-) create mode 100644 CODE_OF_CONDUCT.rst diff --git a/.cruft.json b/.cruft.json index ffd0733..ef8ff71 100644 --- a/.cruft.json +++ b/.cruft.json @@ -1,6 +1,6 @@ { "template": "https://github.com/Ouranosinc/cookiecutter-pypackage.git", - "commit": "f391bbd6ee14ab2478c64a1f78b74bd9903cae81", + "commit": "33f63b31dacfc75dcd4187c465b82d8868787e33", "checkout": null, "context": { "cookiecutter": { @@ -22,6 +22,7 @@ "create_author_file": "y", "open_source_license": "Apache Software License 2.0", "generated_with_cruft": "y", + "__gh_slug": "bzah/xncml", "_template": "https://github.com/Ouranosinc/cookiecutter-pypackage.git" } }, diff --git a/.gitignore b/.gitignore index 15dc0c6..7eeaee4 100644 --- a/.gitignore +++ b/.gitignore @@ -72,6 +72,9 @@ target/ # Jupyter Notebook .ipynb_checkpoints +# Dask worker cache +dask-worker-space/ + # pyenv .python-version diff --git a/CHANGELOG.rst b/CHANGELOG.rst index ce1ea27..63b9260 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,4 @@ +========= Changelog ========= diff --git a/CODE_OF_CONDUCT.rst b/CODE_OF_CONDUCT.rst new file mode 100644 index 0000000..d11c563 --- /dev/null +++ b/CODE_OF_CONDUCT.rst @@ -0,0 +1,84 @@ +==================================== +Contributor Covenant Code of Conduct +==================================== + +Our Pledge +---------- + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +Our Standards +------------- + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +Our Responsibilities +-------------------- + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +Scope +----- + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +Enforcement +----------- + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at [INSERT EMAIL ADDRESS]. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +Attribution +----------- + +This Code of Conduct is adapted from the `Contributor Covenant`_, version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq + +.. _`Contributor Covenant`: https://www.contributor-covenant.org diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index ba0f0d6..bc3f832 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -1,3 +1,4 @@ +============ Contributing ============ @@ -11,7 +12,7 @@ Types of Contributions Report Bugs ~~~~~~~~~~~ -Report bugs at https://github.com/xarray-contrib/xncml/issues. +Report bugs at https://github.com/audreyr/xncml/issues. If you are reporting a bug, please include: @@ -37,7 +38,7 @@ xncml could always use more documentation, whether as part of the official xncml Submit Feedback ~~~~~~~~~~~~~~~ -The best way to send feedback is to file an issue at https://github.com/xarray-contrib/xncml/issues. +The best way to send feedback is to file an issue at https://github.com/audreyr/xncml/issues. If you are proposing a feature: @@ -53,51 +54,73 @@ Get Started! If you are new to using GitHub and `git`, please read `this guide `_ first. +.. warning:: + + Anaconda Python users: Due to the complexity of some packages, the default dependency solver can take a long time to resolve the environment. Consider running the following commands in order to speed up the process:: + + .. code-block:: console + + conda install -n base conda-libmamba-solver + conda config --set solver libmamba + + For more information, please see the following link: https://www.anaconda.com/blog/a-faster-conda-for-a-growing-community + + Alternatively, you can use the `mamba `_ package manager, which is a drop-in replacement for ``conda``. If you are already using `mamba`, replace the following commands with ``mamba`` instead of ``conda``. + Ready to contribute? Here's how to set up ``xncml`` for local development. #. Fork the ``xncml`` repo on GitHub. #. Clone your fork locally:: + .. code-block:: console + git clone git@github.com:your_name_here/xncml.git -#. Install your local copy into a development environment. Using ``virtualenv`` (``virtualenvwrapper``), you can create a new development environment with:: +#. Install your local copy into a development environment. You can create a new Anaconda development environment with:: - python -m pip install flit virtualenvwrapper - mkvirtualenv xncml - cd xncml/ - flit install --symlink + .. code-block:: console - This installs ``xncml`` in an "editable" state, meaning that changes to the code are immediately seen by the environment. + conda env create -f environment-dev.yml + conda activate xncml + make dev -#. To ensure a consistent coding style, install the ``pre-commit`` hooks to your local clone:: + This installs ``xncml`` in an "editable" state, meaning that changes to the code are immediately seen by the environment. - pre-commit install +#. To ensure a consistent coding style, ``make dev`` also installs the ``pre-commit`` hooks to your local clone:: - On commit, ``pre-commit`` will check that ``flake8``, and ``ruff`` checks are passing, perform automatic fixes if possible, and warn of violations that require intervention. If your commit fails the checks initially, simply fix the errors, re-add the files, and re-commit. + On commit, ``pre-commit`` will check that ``black``, ``blackdoc``, ``isort``, ``flake8``, and ``ruff`` checks are passing, perform automatic fixes if possible, and warn of violations that require intervention. If your commit fails the checks initially, simply fix the errors, re-add the files, and re-commit. You can also run the hooks manually with:: - pre-commit run -a + .. code-block:: console + + pre-commit run -a If you want to skip the ``pre-commit`` hooks temporarily, you can pass the ``--no-verify`` flag to `git commit`. #. Create a branch for local development:: - git checkout -b name-of-your-bugfix-or-feature + .. code-block:: console + + git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. #. When you're done making changes, we **strongly** suggest running the tests in your environment or with the help of ``tox``:: - python -m pytest + .. code-block:: console + make lint + python -m pytest # Or, to run multiple build tests - tox + python -m tox #. Commit your changes and push your branch to GitHub:: - git add . - git commit -m "Your detailed description of your changes." - git push origin name-of-your-bugfix-or-feature + .. code-block:: console + + git add . + git commit -m "Your detailed description of your changes." + git push origin name-of-your-bugfix-or-feature If ``pre-commit`` hooks fail, try re-committing your changes (or, if need be, you can skip them with `git commit --no-verify`). @@ -105,13 +128,15 @@ Ready to contribute? Here's how to set up ``xncml`` for local development. #. When pushing your changes to your branch on GitHub, the documentation will automatically be tested to reflect the changes in your Pull Request. This build process can take several minutes at times. If you are actively making changes that affect the documentation and wish to save time, you can compile and test your changes beforehand locally with:: + .. code-block:: console + # To generate the html and open it in your browser - make docs + make docs # To only generate the html - make autodoc - make -C docs html + make autodoc + make -C docs html # To simply test that the docs pass build checks - tox -e docs + python -m tox -e docs #. Once your Pull Request has been accepted and merged to the ``main`` branch, several automated workflows will be triggered: @@ -130,21 +155,33 @@ Before you submit a pull request, check that it meets these guidelines: #. If the pull request adds functionality, the docs should also be updated. Put your new functionality into a function with a docstring, and add the feature to the list in ``README.rst``. -#. The pull request should work for Python 3.8, 3.9, 3.10, 3.11, and 3.12. Check that the tests pass for all supported Python versions. +#. The pull request should work for Python 3.8, 3.9, 3.10, 3.11, 3.12 and PyPy. Check that the tests pass for all supported Python versions. Tips ---- To run a subset of tests:: + .. code-block:: console + pytest tests.test_xncml To run specific code style checks:: - black --check xncml tests - isort --check xncml tests - blackdoc --check xncml docs - ruff xncml tests - flake8 xncml tests + .. code-block:: console + + python -m black --check xncml tests + python -m isort --check xncml tests + python -m blackdoc --check xncml docs + python -m ruff xncml tests + python -m flake8 xncml tests + +To get ``black``, ``isort``, ``blackdoc``, ``ruff``, and ``flake8`` (with plugins ``flake8-alphabetize`` and ``flake8-rst-docstrings``) simply install them with `pip` (or `conda`) into your environment. + +Code of Conduct +--------------- + +Please note that this project is released with a `Contributor Code of Conduct`_. +By participating in this project you agree to abide by its terms. -To get ``black``, ``isort``, ``blackdoc``, ``ruff``, and ``flake8`` (with plugins ``flake8-alphabetize`` and ``flake8-rst-docstrings``) simply install them with `pip` into your environment. +.. _`Contributor Code of Conduct`: CODE_OF_CONDUCT.rst diff --git a/Makefile b/Makefile index b4649b6..a65d8e3 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: clean clean-build clean-pyc clean-test coverage dist docs help install lint lint/flake8 +.PHONY: clean clean-build clean-pyc clean-test coverage dist docs help install lint lint/flake8 lint/black .DEFAULT_GOAL := help define BROWSER_PYSCRIPT @@ -22,6 +22,7 @@ endef export PRINT_HELP_PYSCRIPT BROWSER := python -c "$$BROWSER_PYSCRIPT" +LOCALES := docs/locales help: @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) @@ -38,6 +39,7 @@ clean-build: ## remove build artifacts clean-docs: ## remove docs artifacts rm -f docs/apidoc/xncml*.rst rm -f docs/apidoc/modules.rst + rm -fr docs/locales/fr/LC_MESSAGES/*.mo $(MAKE) -C docs clean clean-pyc: ## remove Python file artifacts @@ -53,10 +55,15 @@ clean-test: ## remove test and coverage artifacts rm -fr .pytest_cache lint/flake8: ## check style with flake8 - ruff src/xncml tests - flake8 --config=.flake8 src/xncml tests + ruff xncml tests + flake8 --config=.flake8 xncml tests -lint: lint/flake8 ## check style +lint/black: ## check style with black + black --check xncml tests + blackdoc --check xncml docs + isort --check xncml tests + +lint: lint/flake8 lint/black ## check style test: ## run tests quickly with the default Python python -m pytest @@ -65,21 +72,28 @@ test-all: ## run tests on every Python version with tox tox coverage: ## check code coverage quickly with the default Python - coverage run --source src/xncml -m pytest + coverage run --source xncml -m pytest coverage report -m coverage html $(BROWSER) htmlcov/index.html +initialize-translations: clean-docs ## initialize translations, ignoring autodoc-generated files + ${MAKE} -C docs gettext + sphinx-intl update -p docs/_build/gettext -d docs/locales -l fr autodoc: clean-docs ## create sphinx-apidoc files: - sphinx-apidoc -o docs/apidoc --private --module-first src/xncml + sphinx-apidoc -o docs/apidoc --private --module-first xncml linkcheck: autodoc ## run checks over all external links found throughout the documentation $(MAKE) -C docs linkcheck docs: autodoc ## generate Sphinx HTML documentation, including API docs - $(MAKE) -C docs html + $(MAKE) -C docs html BUILDDIR="_build/html/en" +ifneq ("$(wildcard $(LOCALES))","") + ${MAKE} -C docs gettext + $(MAKE) -C docs html BUILDDIR="_build/html/fr" SPHINXOPTS="-D language='fr'" +endif ifndef READTHEDOCS - $(BROWSER) docs/_build/html/index.html + $(BROWSER) docs/_build/html/en/html/index.html endif servedocs: docs ## compile the docs watching for changes @@ -93,7 +107,8 @@ release: dist ## package and upload a release python -m flit publish dist/* install: clean ## install the package to the active Python's site-packages - python -m pip install .[all] + python -m pip install . dev: clean ## install the package to the active Python's site-packages python -m pip install --editable .[all] + pre-commit install diff --git a/docs/index.rst b/docs/index.rst index fa73bc9..88a9dd6 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -10,6 +10,7 @@ Xncml Documentation readme tutorial contributing + releasing authors changelog diff --git a/pyproject.toml b/pyproject.toml index b190e30..7497897 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,11 +55,12 @@ dev = [ "flake8-rst-docstrings >=0.3.0", "flit >=3.9.0", "tox >=4.5.1", - "coverage >=6.2.2,<7.0.0", + "coverage >=7.0.0", "coveralls >=3.3.1", + "mypy", "pytest >=7.3.1", "pytest-cov >=4.0.0", - "ruff >=0.2.0", + "ruff >=0.3.0", "pre-commit >=3.3.2" ] docs = [ @@ -80,7 +81,15 @@ all = ["xncml[dev]", "xncml[docs]"] [project.urls] "Source" = "https://github.com/xarray-contrib/xncml" "Issue tracker" = "https://github.com/xarray-contrib/xncml/issues" -"Changelog" = "https://github.com/xarray-contrib/xncml/blob/master/CHANGELOG.md" +"Changelog" = "https://github.com/xarray-contrib/xncml/blob/main/CHANGELOG.rst" + +[tool.black] +target-version = [ + "py39", + "py310", + "py311", + "py312" +] [tool.bumpversion] current_version = "0.5.0" @@ -88,7 +97,7 @@ commit = true commit_args = "--no-verify" tag = false tag_name = "v{new_version}" -allow_dirty = false +allow_dirty = true parse = "(?P\\d+)\\.(?P\\d+)\\.(?P\\d+)(\\-(?P[a-z]+)(\\.(?P\\d+)))?" serialize = [ "{major}.{minor}.{patch}-{release}.{build}", @@ -134,6 +143,7 @@ include = [ "LICENSE", "Makefile", "README.rst", + "environment-dev.yml", "environment-docs.yml", "docs/_static/_images/*.gif", "docs/_static/_images/*.jpg", @@ -158,23 +168,45 @@ exclude = [ ".yamllint.yaml", "docs/_*", "docs/apidoc/modules.rst", - "docs/apidoc/xncml*.rst" + "docs/apidoc/xncml*.rst", + "docs/locales" ] +[tool.isort] +profile = "black" +py_version = 38 + [tool.mypy] -python_version = "3.9" +files = "." +python_version = 3.9 show_error_codes = true +strict = true +warn_no_return = true warn_return_any = true +warn_unreachable = true warn_unused_configs = true [[tool.mypy.overrides]] -module = [] +module = [ + # Don't require test functions to include types + "tests.*" +] +allow_untyped_defs = true +disable_error_code = "attr-defined" ignore_missing_imports = true +[tool.pytest.ini_options] +addopts = [ + "--verbose", + "--color=yes" +] +filterwarnings = ["ignore::UserWarning"] +testpaths = "tests" + [tool.ruff] -src = ["src"] +src = ["xncml"] line-length = 150 -target-version = "py39" +target-version = "py38" exclude = [ ".eggs", ".git", @@ -183,11 +215,14 @@ exclude = [ ] [tool.ruff.format] -# Enable reformatting of code snippets in docstrings. -docstring-code-format = true line-ending = "auto" [tool.ruff.lint] +ignore = [ + "D205", + "D400", + "D401" +] select = [ "C9", "D", @@ -195,11 +230,6 @@ select = [ "F", "W" ] -ignore = [ - "D205", - "D400", - "D401" -] [tool.ruff.lint.flake8-bandit] check-typed-exception = true @@ -215,10 +245,8 @@ no-lines-before = ["future", "standard-library"] max-complexity = 15 [tool.ruff.lint.per-file-ignores] +"docs/**" = ["E402"] "src/xncml/**/__init__.py" = ["F401", "F403"] -"src/xncml/generated/*.py" = ["D"] -"tests/**/*.py" = ["D"] -"src/xncml/parser.py" = ["C901"] # To be added [tool.ruff.lint.pycodestyle] max-doc-length = 180 From 5c42528390a8013ba1943c4b41d8330ec7ea918b Mon Sep 17 00:00:00 2001 From: Abel Aoun Date: Tue, 14 May 2024 14:36:25 +0200 Subject: [PATCH 14/26] fix: lint --- docs/releasing.rst | 1 - pyproject.toml | 3 +++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/releasing.rst b/docs/releasing.rst index f71069d..7737aa8 100644 --- a/docs/releasing.rst +++ b/docs/releasing.rst @@ -124,4 +124,3 @@ We can now leave our docker container (`exit`) and continue with uploading the f .. code-block:: console python -m twine upload dist/* - diff --git a/pyproject.toml b/pyproject.toml index 7497897..c002f7e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -247,6 +247,9 @@ max-complexity = 15 [tool.ruff.lint.per-file-ignores] "docs/**" = ["E402"] "src/xncml/**/__init__.py" = ["F401", "F403"] +"src/xncml/generated/*.py" = ["D"] +"tests/**/*.py" = ["D"] +"src/xncml/parser.py" = ["C901"] # To be added [tool.ruff.lint.pycodestyle] max-doc-length = 180 From 328c28a68c2331d2f53ee2af7394c48b992b4962 Mon Sep 17 00:00:00 2001 From: Abel Aoun Date: Tue, 14 May 2024 15:02:57 +0200 Subject: [PATCH 15/26] fix: Makefile --- Makefile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index a65d8e3..66b8db4 100644 --- a/Makefile +++ b/Makefile @@ -55,13 +55,13 @@ clean-test: ## remove test and coverage artifacts rm -fr .pytest_cache lint/flake8: ## check style with flake8 - ruff xncml tests + ruff check src tests flake8 --config=.flake8 xncml tests lint/black: ## check style with black - black --check xncml tests - blackdoc --check xncml docs - isort --check xncml tests + black --check src/xncml tests + blackdoc --check src/xncml docs + isort --check src/xncml tests lint: lint/flake8 lint/black ## check style @@ -72,7 +72,7 @@ test-all: ## run tests on every Python version with tox tox coverage: ## check code coverage quickly with the default Python - coverage run --source xncml -m pytest + coverage run --source src/xncml -m pytest coverage report -m coverage html $(BROWSER) htmlcov/index.html From 8579ce329e681cd4df301ae709453007631b7ca4 Mon Sep 17 00:00:00 2001 From: Abel Aoun Date: Tue, 14 May 2024 15:21:00 +0200 Subject: [PATCH 16/26] maint: migrate from black to ruff format --- Makefile | 10 ++++------ pyproject.toml | 14 ++------------ 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/Makefile b/Makefile index 66b8db4..dbd339a 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: clean clean-build clean-pyc clean-test coverage dist docs help install lint lint/flake8 lint/black +.PHONY: clean clean-build clean-pyc clean-test coverage dist docs help install lint lint/flake8 lint/format-checkformat-check .DEFAULT_GOAL := help define BROWSER_PYSCRIPT @@ -58,12 +58,10 @@ lint/flake8: ## check style with flake8 ruff check src tests flake8 --config=.flake8 xncml tests -lint/black: ## check style with black - black --check src/xncml tests - blackdoc --check src/xncml docs - isort --check src/xncml tests +lint/format-check: ## check style with ruff format + ruff format --check src/xncml tests -lint: lint/flake8 lint/black ## check style +lint: lint/flake8 lint/format-check ## check style test: ## run tests quickly with the default Python python -m pytest diff --git a/pyproject.toml b/pyproject.toml index c002f7e..5573e2b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -83,14 +83,6 @@ all = ["xncml[dev]", "xncml[docs]"] "Issue tracker" = "https://github.com/xarray-contrib/xncml/issues" "Changelog" = "https://github.com/xarray-contrib/xncml/blob/main/CHANGELOG.rst" -[tool.black] -target-version = [ - "py39", - "py310", - "py311", - "py312" -] - [tool.bumpversion] current_version = "0.5.0" commit = true @@ -172,10 +164,6 @@ exclude = [ "docs/locales" ] -[tool.isort] -profile = "black" -py_version = 38 - [tool.mypy] files = "." python_version = 3.9 @@ -215,6 +203,8 @@ exclude = [ ] [tool.ruff.format] +# Enable reformatting of code snippets in docstrings. +docstring-code-format = true line-ending = "auto" [tool.ruff.lint] From 1aa518839ca5b4ca06e014fc6594a60867febff9 Mon Sep 17 00:00:00 2001 From: Zeitsperre <10819524+Zeitsperre@users.noreply.github.com> Date: Tue, 21 May 2024 13:55:51 -0400 Subject: [PATCH 17/26] fast-forward cookiecutter --- .cruft.json | 4 +- .github/PULL_REQUEST_TEMPLATE.md | 2 +- .github/dependabot.yml | 6 +- .github/workflows/bump-version.yml | 13 ++- .github/workflows/cache-cleaner.yml | 5 +- .github/workflows/dependency-review.yml | 10 +- .github/workflows/first-pull-request.yml | 5 +- .github/workflows/main.yml | 20 +++- .github/workflows/publish-pypi.yml | 9 +- .github/workflows/scorecard.yml | 13 ++- .github/workflows/tag-testpypi.yml | 13 +-- .github/workflows/workflow-warning.yml | 8 +- .pre-commit-config.yaml | 4 +- CHANGELOG.rst | 32 +++---- CODE_OF_CONDUCT.rst | 84 ----------------- CONTRIBUTING.rst | 113 ++++++++++++----------- Makefile | 10 +- docs/releasing.rst | 82 ++++++++-------- pyproject.toml | 10 +- tox.ini | 4 +- 20 files changed, 194 insertions(+), 253 deletions(-) delete mode 100644 CODE_OF_CONDUCT.rst diff --git a/.cruft.json b/.cruft.json index ef8ff71..cdcc843 100644 --- a/.cruft.json +++ b/.cruft.json @@ -1,6 +1,6 @@ { "template": "https://github.com/Ouranosinc/cookiecutter-pypackage.git", - "commit": "33f63b31dacfc75dcd4187c465b82d8868787e33", + "commit": "f9e0b049711af2023e9a3f5df594f4dbc25b07c1", "checkout": null, "context": { "cookiecutter": { @@ -22,7 +22,7 @@ "create_author_file": "y", "open_source_license": "Apache Software License 2.0", "generated_with_cruft": "y", - "__gh_slug": "bzah/xncml", + "__gh_slug": "https://github.com/bzah/xncml", "_template": "https://github.com/Ouranosinc/cookiecutter-pypackage.git" } }, diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index dc5f20e..6bc65ab 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -5,7 +5,7 @@ - This PR fixes #xyz - [ ] (If applicable) Documentation has been added / updated (for bug fixes / features). - [ ] (If applicable) Tests have been added. -- [ ] CHANGES.rst has been updated (with summary of main changes). +- [ ] CHANGELOG.rst has been updated (with summary of main changes). - [ ] Link to issue (:issue:`number`) and pull request (:pull:`number`) has been added. ### What kind of change does this PR introduce? diff --git a/.github/dependabot.yml b/.github/dependabot.yml index a9f2875..d86e88c 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,13 +3,11 @@ updates: - package-ecosystem: github-actions directory: / schedule: - interval: daily - time: '12:00' + interval: monthly open-pull-requests-limit: 10 - package-ecosystem: pip directory: / schedule: - interval: daily - time: '12:00' + interval: monthly open-pull-requests-limit: 10 diff --git a/.github/workflows/bump-version.yml b/.github/workflows/bump-version.yml index 9f57c0f..780d4e4 100644 --- a/.github/workflows/bump-version.yml +++ b/.github/workflows/bump-version.yml @@ -18,7 +18,7 @@ on: - .yamllint.yaml - .zenodo.json - AUTHORS.rst - - CHANGES.rst + - CHANGELOG.rst - CONTRIBUTING.rst - Makefile - .readthedocs.yml @@ -42,7 +42,7 @@ jobs: contents: write steps: - name: Harden Runner - uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 with: disable-sudo: true egress-policy: block @@ -50,10 +50,13 @@ jobs: files.pythonhosted.org:443 github.com:443 pypi.org:443 - - uses: actions/checkout@v4 + - name: Checkout Repository (no persist-credentials) + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: persist-credentials: false - - uses: actions/setup-python@v4 + fetch-depth: 0 + - name: Set up Python3 + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: "3.x" - name: Config Commit Bot @@ -79,7 +82,7 @@ jobs: fi bump-my-version show-bump - name: Push Changes - uses: ad-m/github-push-action@master + uses: ad-m/github-push-action@d91a481090679876dfc4178fef17f286781251df # v0.8.0 with: force: false github_token: ${{ secrets.BUMP_VERSION_TOKEN }} diff --git a/.github/workflows/cache-cleaner.yml b/.github/workflows/cache-cleaner.yml index d48a7f6..9824374 100644 --- a/.github/workflows/cache-cleaner.yml +++ b/.github/workflows/cache-cleaner.yml @@ -16,7 +16,7 @@ jobs: actions: write steps: - name: Harden Runner - uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 with: disable-sudo: true egress-policy: block @@ -25,7 +25,8 @@ jobs: github.com:443 objects.githubusercontent.com:443 - - uses: actions/checkout@v4.1.1 + - name: Checkout Repository + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - name: Cleanup run: | diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index c977388..ecc6f22 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 with: disable-sudo: true egress-policy: block @@ -24,8 +24,8 @@ jobs: api.github.com:443 github.com:443 - - name: 'Checkout Repository' - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Checkout Repository + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - - name: 'Dependency Review' - uses: actions/dependency-review-action@4901385134134e04cec5fbe5ddfe3b2c5bd5d976 + - name: Dependency Review + uses: actions/dependency-review-action@0c155c5e8556a497adf53f2c18edabf945ed8e70 # v4.3.2 diff --git a/.github/workflows/first-pull-request.yml b/.github/workflows/first-pull-request.yml index dfaca22..ec70a48 100644 --- a/.github/workflows/first-pull-request.yml +++ b/.github/workflows/first-pull-request.yml @@ -11,14 +11,15 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 with: disable-sudo: true egress-policy: block allowed-endpoints: > api.github.com:443 - - uses: actions/github-script@v6 + - name: Verify Pull Request Opener + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 with: script: | // Get a list of all issues created by the PR opener diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d4d341f..6ba1704 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -6,7 +6,7 @@ on: - main paths-ignore: - .cruft.json - - CHANGES.rst + - CHANGELOG.rst - README.rst - pyproject.toml - tests/test_xncml.py @@ -30,9 +30,14 @@ jobs: python-version: - "3.x" steps: - - uses: actions/checkout@v4 + - name: Harden Runner + uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 + with: + egress-policy: audit + - name: Checkout Repository + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - name: Set up Python${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: ${{ matrix.python-version }} - name: Install tox @@ -58,9 +63,14 @@ jobs: - tox-env: "py312" python-version: "3.12" steps: - - uses: actions/checkout@v4 + - name: Harden Runner + uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 + with: + egress-policy: audit + - name: Checkout Repository + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - name: Set up Python${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: ${{ matrix.python-version }} - name: Install tox diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index f91b018..0c30a2a 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -18,7 +18,7 @@ jobs: id-token: write steps: - name: Harden Runner - uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 with: disable-sudo: true egress-policy: block @@ -27,9 +27,10 @@ jobs: github.com:443 pypi.org:443 upload.pypi.org:443 - - uses: actions/checkout@v4 + - name: Checkout Repository + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - name: Set up Python3 - uses: actions/setup-python@v4 + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: "3.x" - name: Install packaging libraries @@ -39,4 +40,4 @@ jobs: run: | python -m flit build - name: Publish distribution 📦 to PyPI - uses: pypa/gh-action-pypi-publish@release/v1 + uses: pypa/gh-action-pypi-publish@81e9d935c883d0b210363ab89cf05f3894778450 # v1.8.14 diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index cb4f4ba..5b357a5 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -13,7 +13,7 @@ on: - cron: '41 8 * * 4' push: branches: - - master + - main # Declare default permissions as read only. permissions: read-all @@ -36,20 +36,23 @@ jobs: allowed-endpoints: > api.github.com:443 api.osv.dev:443 + api.scorecard.dev:443 api.securityscorecards.dev:443 fulcio.sigstore.dev:443 github.com:443 + index.docker.io:443 oss-fuzz-build-logs.storage.googleapis.com:443 rekor.sigstore.dev:443 tuf-repo-cdn.sigstore.dev:443 www.bestpractices.dev:443 - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 + - name: Checkout Repository + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: persist-credentials: false - name: Run analysis - uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 + uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # v2.3.3 with: results_file: results.sarif results_format: sarif @@ -69,7 +72,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: Upload artifact - uses: actions/upload-artifact@694cdabd8bdb0f10b2cea11669e1bf5453eed0a6 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 with: name: SARIF file path: results.sarif @@ -77,6 +80,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: Upload to code-scanning - uses: github/codeql-action/upload-sarif@e5f05b81d5b6ff8cfa111c80c22c5fd02a384118 # 3.23.0 + uses: github/codeql-action/upload-sarif@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # 3.25.5 with: sarif_file: results.sarif diff --git a/.github/workflows/tag-testpypi.yml b/.github/workflows/tag-testpypi.yml index 13487a3..8f4835b 100644 --- a/.github/workflows/tag-testpypi.yml +++ b/.github/workflows/tag-testpypi.yml @@ -17,13 +17,13 @@ jobs: contents: write steps: - name: Harden Runner - uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 with: egress-policy: audit - name: Checkout code uses: actions/checkout@v4 - name: Create Release - uses: softprops/action-gh-release@v1 + uses: softprops/action-gh-release@69320dbe05506a9a39fc8ae11030b214ec2d1f87 # 2.0.5 env: # This token is provided by Actions, you do not need to create your own token GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -42,7 +42,7 @@ jobs: id-token: write steps: - name: Harden Runner - uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 with: disable-sudo: true egress-policy: block @@ -51,9 +51,10 @@ jobs: github.com:443 pypi.org:443 test.pypi.org:443 - - uses: actions/checkout@v4 + - name: Checkout Repository + uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - name: Set up Python3 - uses: actions/setup-python@v4 + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: "3.x" - name: Install packaging libraries @@ -63,7 +64,7 @@ jobs: run: | python -m flit build - name: Publish distribution 📦 to Test PyPI - uses: pypa/gh-action-pypi-publish@release/v1 + uses: pypa/gh-action-pypi-publish@81e9d935c883d0b210363ab89cf05f3894778450 # v1.8.14 with: repository-url: https://test.pypi.org/legacy/ skip-existing: true diff --git a/.github/workflows/workflow-warning.yml b/.github/workflows/workflow-warning.yml index 433881b..a4887a7 100644 --- a/.github/workflows/workflow-warning.yml +++ b/.github/workflows/workflow-warning.yml @@ -26,14 +26,14 @@ jobs: pull-requests: write steps: - name: Harden Runner - uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 with: disable-sudo: true egress-policy: block allowed-endpoints: > api.github.com:443 - name: Find comment - uses: peter-evans/find-comment@a54c31d7fa095754bfef525c0c8e5e5674c4b4b1 # v2.4.0 + uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0 id: fc with: issue-number: ${{ github.event.pull_request.number }} @@ -45,7 +45,7 @@ jobs: (steps.fc.outputs.comment-id == '') && (!contains(github.event.pull_request.labels.*.name, 'approved')) && (github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name) - uses: peter-evans/create-or-update-comment@23ff15729ef2fc348714a3bb66d2f655ca9066f2 # v3.1.0 + uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0 with: comment-id: ${{ steps.fc.outputs.comment-id }} issue-number: ${{ github.event.pull_request.number }} @@ -57,7 +57,7 @@ jobs: - name: Update comment if: | contains(github.event.pull_request.labels.*.name, 'approved') - uses: peter-evans/create-or-update-comment@23ff15729ef2fc348714a3bb66d2f655ca9066f2 # v3.1.0 + uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0 with: comment-id: ${{ steps.fc.outputs.comment-id }} issue-number: ${{ github.event.pull_request.number }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e8db23a..c11e5b2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,7 +24,7 @@ repos: - id: python-use-type-annotations - id: rst-inline-touching-normal - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.7 + rev: v0.4.4 hooks: - id: ruff args: [ --fix ] @@ -41,7 +41,7 @@ repos: - id: yamllint args: [ '--config-file=.yamllint.yaml' ] - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.28.2 + rev: 0.28.3 hooks: - id: check-github-workflows - id: check-readthedocs diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 63b9260..2a4a552 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,8 +5,8 @@ Changelog `Unreleased `_ (latest) ---------------------------------------------------------------------------- -- Added support for running `pytest` with `pytest-cov`. By @Zeitsperre -- Reworked the GitHub CI testing workflow to perform version checks as well as tests with `pytest-cov` . By @Zeitsperre +- Added support for running `pytest` with `pytest-cov` (by :user:`Zeitsperre`). +- Reworked the GitHub CI testing workflow to perform version checks as well as tests with `pytest-cov` (by :user:`Zeitsperre`). Breaking changes ^^^^^^^^^^^^^^^^ @@ -21,10 +21,10 @@ Breaking changes 0.4.0 (2024-01-08) ------------------ -- Add support for . By @bzah -- Update XSD schema and dataclasses to latest version from netcdf-java to add support for unsigned types. By @bzah -- Add support for scalar variables. By @Bzah -- [fix] empty attributes are now parsed into an empty string instead of crashing the parser. By @Bzah +- Add support for (by :user:`bzah`). +- Update XSD schema and dataclasses to latest version from netcdf-java to add support for unsigned types (by :user:`bzah`). +- Add support for scalar variables (by :user:`bzah`). +- [fix] empty attributes are now parsed into an empty string instead of crashing the parser (by :user:`bzah`). .. _changes-0.3.1: @@ -40,10 +40,10 @@ Breaking changes 0.3 (2023-08-28) ---------------- -- Add `add_aggregation` and `add_variable_agg` to `Dataset` class. By @huard -- Add `add_scan` to `Dataset` class. By @huard -- Closing the dataset returned by `open_ncml` will close the underlying opened files. By @huard -- Add `Dataset.from_text` classmethod to create a `Dataset` from an XML string. By @huard +- Add `add_aggregation` and `add_variable_agg` to `Dataset` class (by :user:`huard`). +- Add `add_scan` to `Dataset` class (by :user:`huard`). +- Closing the dataset returned by `open_ncml` will close the underlying opened files (by :user:`huard`). +- Add `Dataset.from_text` classmethod to create a `Dataset` from an XML string (by :user:`huard`). .. _changes-0.2: @@ -51,10 +51,10 @@ Breaking changes 0.2 (2023-02-23) ---------------- -- Implement `Dataset.rename_dataset_attribute`. By @huard -- Allow empty `Dataset` creation. By @huard -- Add support in `Dataset` for NcML documents using the `ncml` namespace. By @huard -- Implement `Dataset.to_cf_dict` method to export CF-JSON dictionary. By @huard. +- Implement `Dataset.rename_dataset_attribute` (by :user:`huard`). +- Allow empty `Dataset` creation (by :user:`huard`). +- Add support in `Dataset` for NcML documents using the `ncml` namespace (by :user:`huard`). +- Implement `Dataset.to_cf_dict` method to export CF-JSON dictionary (by :user:`huard`). .. _changes-0.1: @@ -62,6 +62,6 @@ Breaking changes 0.1 Initial release (2022-11-24) -------------------------------- - - Manipulate NcML file: add & remove attributes, variables and dimensions. By @andersy005 + - Manipulate NcML file: add & remove attributes, variables and dimensions. (by :user:`andersy005`). - Implement `open_ncml`, which returns an `xarray.Dataset` built from an NcML. Note that - Only a subset of the NcML syntax is supported. By @huard + Only a subset of the NcML syntax is supported. (by :user:`huard`). diff --git a/CODE_OF_CONDUCT.rst b/CODE_OF_CONDUCT.rst deleted file mode 100644 index d11c563..0000000 --- a/CODE_OF_CONDUCT.rst +++ /dev/null @@ -1,84 +0,0 @@ -==================================== -Contributor Covenant Code of Conduct -==================================== - -Our Pledge ----------- - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to make participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -Our Standards -------------- - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -Our Responsibilities --------------------- - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -Scope ------ - -This Code of Conduct applies within all project spaces, and it also applies when -an individual is representing the project or its community in public spaces. -Examples of representing a project or community include using an official -project e-mail address, posting via an official social media account, or acting -as an appointed representative at an online or offline event. Representation of -a project may be further defined and clarified by project maintainers. - -Enforcement ------------ - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at [INSERT EMAIL ADDRESS]. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -Attribution ------------ - -This Code of Conduct is adapted from the `Contributor Covenant`_, version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq - -.. _`Contributor Covenant`: https://www.contributor-covenant.org diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index bc3f832..56f2cf1 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -56,9 +56,9 @@ Get Started! .. warning:: - Anaconda Python users: Due to the complexity of some packages, the default dependency solver can take a long time to resolve the environment. Consider running the following commands in order to speed up the process:: + Anaconda Python users: Due to the complexity of some packages, the default dependency solver can take a long time to resolve the environment. Consider running the following commands in order to speed up the process: - .. code-block:: console + .. code-block:: console conda install -n base conda-libmamba-solver conda config --set solver libmamba @@ -70,118 +70,123 @@ Get Started! Ready to contribute? Here's how to set up ``xncml`` for local development. #. Fork the ``xncml`` repo on GitHub. -#. Clone your fork locally:: - - .. code-block:: console +#. Clone your fork locally: - git clone git@github.com:your_name_here/xncml.git + .. code-block:: console -#. Install your local copy into a development environment. You can create a new Anaconda development environment with:: + git clone git@github.com:your_name_here/xncml.git - .. code-block:: console +#. Install your local copy into a development environment. You can create a new Anaconda development environment with: - conda env create -f environment-dev.yml - conda activate xncml - make dev + .. code-block:: console - This installs ``xncml`` in an "editable" state, meaning that changes to the code are immediately seen by the environment. + python -m pip install virtualenvwrapper + mkvirtualenv xncml + cd xncml/ + make dev -#. To ensure a consistent coding style, ``make dev`` also installs the ``pre-commit`` hooks to your local clone:: + This installs ``xncml`` in an "editable" state, meaning that changes to the code are immediately seen by the environment. To ensure a consistent coding style, `make dev` also installs the ``pre-commit`` hooks to your local clone. On commit, ``pre-commit`` will check that ``black``, ``blackdoc``, ``isort``, ``flake8``, and ``ruff`` checks are passing, perform automatic fixes if possible, and warn of violations that require intervention. If your commit fails the checks initially, simply fix the errors, re-add the files, and re-commit. - You can also run the hooks manually with:: + You can also run the hooks manually with: - .. code-block:: console + .. code-block:: console - pre-commit run -a + pre-commit run -a If you want to skip the ``pre-commit`` hooks temporarily, you can pass the ``--no-verify`` flag to `git commit`. -#. Create a branch for local development:: +#. Create a branch for local development: - .. code-block:: console + .. code-block:: console - git checkout -b name-of-your-bugfix-or-feature + git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. -#. When you're done making changes, we **strongly** suggest running the tests in your environment or with the help of ``tox``:: +#. When you're done making changes, we **strongly** suggest running the tests in your environment or with the help of ``tox``: - .. code-block:: console - make lint - python -m pytest - # Or, to run multiple build tests - python -m tox + .. code-block:: console + make lint + python -m pytest + # Or, to run multiple build tests + python -m tox -#. Commit your changes and push your branch to GitHub:: +#. Commit your changes and push your branch to GitHub: - .. code-block:: console + .. code-block:: console - git add . - git commit -m "Your detailed description of your changes." - git push origin name-of-your-bugfix-or-feature + git add . + git commit -m "Your detailed description of your changes." + git push origin name-of-your-bugfix-or-feature - If ``pre-commit`` hooks fail, try re-committing your changes (or, if need be, you can skip them with `git commit --no-verify`). + If ``pre-commit`` hooks fail, try re-committing your changes (or, if need be, you can skip them with `git commit --no-verify`). #. Submit a `Pull Request `_ through the GitHub website. -#. When pushing your changes to your branch on GitHub, the documentation will automatically be tested to reflect the changes in your Pull Request. This build process can take several minutes at times. If you are actively making changes that affect the documentation and wish to save time, you can compile and test your changes beforehand locally with:: +#. When pushing your changes to your branch on GitHub, the documentation will automatically be tested to reflect the changes in your Pull Request. This build process can take several minutes at times. If you are actively making changes that affect the documentation and wish to save time, you can compile and test your changes beforehand locally with: - .. code-block:: console + .. code-block:: console - # To generate the html and open it in your browser - make docs - # To only generate the html - make autodoc - make -C docs html - # To simply test that the docs pass build checks - python -m tox -e docs + # To generate the html and open it in your browser + make docs + # To only generate the html + make autodoc + make -C docs html + # To simply test that the docs pass build checks + python -m tox -e docs -#. Once your Pull Request has been accepted and merged to the ``main`` branch, several automated workflows will be triggered: +#. Once your Pull Request has been accepted and merged to the `main` branch, several automated workflows will be triggered: - - The ``bump-version.yml`` workflow will automatically bump the patch version when pull requests are pushed to the ``main`` branch on GitHub. **It is not recommended to manually bump the version in your branch when merging (non-release) pull requests (this will cause the version to be bumped twice).** + - The ``bump-version.yml`` workflow will automatically bump the patch version when pull requests are pushed to the `main` branch on GitHub. **It is not recommended to manually bump the version in your branch when merging (non-release) pull requests (this will cause the version to be bumped twice).** - `ReadTheDocs` will automatically build the documentation and publish it to the `latest` branch of `xncml` documentation website. - If your branch is not a fork (ie: you are a maintainer), your branch will be automatically deleted. - You will have contributed your first changes to ``xncml``! + You will have contributed your first changes to ``xncml``! Pull Request Guidelines ----------------------- Before you submit a pull request, check that it meets these guidelines: -#. The pull request should include tests and should aim to provide `code coverage `_ for all new lines of code. You can use the ``--cov-report html --cov xncml`` flags during the call to ``pytest`` to generate an HTML report and analyse the current test coverage. +#. The pull request should include tests and should aim to provide `code coverage `_ for all new lines of code. You can use the `--cov-report html --cov xncml` flags during the call to ``pytest`` to generate an HTML report and analyse the current test coverage. #. If the pull request adds functionality, the docs should also be updated. Put your new functionality into a function with a docstring, and add the feature to the list in ``README.rst``. -#. The pull request should work for Python 3.8, 3.9, 3.10, 3.11, 3.12 and PyPy. Check that the tests pass for all supported Python versions. +#. The pull request should work for Python 3.9, 3.10, 3.11, and 3.12. Check that the tests pass for all supported Python versions. Tips ---- -To run a subset of tests:: +To run a subset of tests: - .. code-block:: console +.. code-block:: console + + python -m pytest tests/test_xncml.py + +You can also directly call a specific test class or test function using: -pytest tests.test_xncml +.. code-block:: console -To run specific code style checks:: + python -m pytest tests/test_xncml.py::TestClassName::test_function_name + +For more information on running tests, see the `pytest documentation `_. + +To run specific code style checks: .. code-block:: console python -m black --check xncml tests python -m isort --check xncml tests python -m blackdoc --check xncml docs - python -m ruff xncml tests + python -m ruff check xncml tests python -m flake8 xncml tests -To get ``black``, ``isort``, ``blackdoc``, ``ruff``, and ``flake8`` (with plugins ``flake8-alphabetize`` and ``flake8-rst-docstrings``) simply install them with `pip` (or `conda`) into your environment. +To get ``black``, ``isort``, ``blackdoc``, ``ruff``, and ``flake8`` (with plugins ``flake8-alphabetize`` and ``flake8-rst-docstrings``) simply install them with ``pip`` (or ``conda``) into your environment. Code of Conduct --------------- -Please note that this project is released with a `Contributor Code of Conduct`_. +Please note that this project is released with a `Contributor Code of Conduct `_. By participating in this project you agree to abide by its terms. - -.. _`Contributor Code of Conduct`: CODE_OF_CONDUCT.rst diff --git a/Makefile b/Makefile index dbd339a..383a255 100644 --- a/Makefile +++ b/Makefile @@ -56,7 +56,7 @@ clean-test: ## remove test and coverage artifacts lint/flake8: ## check style with flake8 ruff check src tests - flake8 --config=.flake8 xncml tests + flake8 --config=.flake8 src/xncml tests lint/format-check: ## check style with ruff format ruff format --check src/xncml tests @@ -70,16 +70,16 @@ test-all: ## run tests on every Python version with tox tox coverage: ## check code coverage quickly with the default Python - coverage run --source src/xncml -m pytest - coverage report -m - coverage html + python -m coverage run --source src/xncml -m pytest + python -m coverage report -m + python -m coverage html $(BROWSER) htmlcov/index.html initialize-translations: clean-docs ## initialize translations, ignoring autodoc-generated files ${MAKE} -C docs gettext sphinx-intl update -p docs/_build/gettext -d docs/locales -l fr autodoc: clean-docs ## create sphinx-apidoc files: - sphinx-apidoc -o docs/apidoc --private --module-first xncml + sphinx-apidoc -o docs/apidoc --private --module-first src/xncml linkcheck: autodoc ## run checks over all external links found throughout the documentation $(MAKE) -C docs linkcheck diff --git a/docs/releasing.rst b/docs/releasing.rst index 7737aa8..1365609 100644 --- a/docs/releasing.rst +++ b/docs/releasing.rst @@ -9,32 +9,33 @@ A reminder for the **maintainers** on how to deploy. This section is only releva .. warning:: - It is important to be aware that any changes to files found within the ``{{ cookiecutter.project_slug }}`` folder (with the exception of ``{{ cookiecutter.project_slug }}/__init__.py``) will trigger the ``bump-version.yml`` workflow. Be careful not to commit changes to files in this folder when preparing a new release. + It is important to be aware that any changes to files found within the ``src/xncml`` folder (with the exception of ``src/xncml/__init__.py``) will trigger the ``bump-version.yml`` workflow. Be careful not to commit changes to files in this folder when preparing a new release. #. Create a new branch from `main` (e.g. `release-0.2.0`). -#. Update the `CHANGES.rst` file to change the `Unreleased` section to the current date. -#. Bump the version in your branch to the next version (e.g. `v0.1.0 -> v0.2.0`):: +#. Update the `CHANGELOG.rst` file to change the `Unreleased` section to the current date. +#. Bump the version in your branch to the next version (e.g. `v0.1.0 -> v0.2.0`): - .. code-block:: console + .. code-block:: console - bump-my-version bump minor # In most cases, we will be releasing a minor version - git push + bump-my-version bump minor # In most cases, we will be releasing a minor version + bump-my-version bump release # This will update the version strings to drop the `dev` suffix + git push #. Create a pull request from your branch to `main`. -#. Once the pull request is merged, create a new release on GitHub. On the main branch, run:: +#. Once the pull request is merged, create a new release on GitHub. On the `main` branch, run: - .. code-block:: console + .. code-block:: console - git tag v0.2.0 - git push --tags + git tag v0.2.0 + git push --tags - This will trigger a GitHub workflow to build the package and upload it to TestPyPI. At the same time, the GitHub workflow will create a draft release on GitHub. Assuming that the workflow passes, the final release can then be published on GitHub by finalizing the draft release. + This will trigger a GitHub workflow to build the package and upload it to TestPyPI. At the same time, the GitHub workflow will create a draft release on GitHub. Assuming that the workflow passes, the final release can then be published on GitHub by finalizing the draft release. #. Once the release is published, the `publish-pypi.yml` workflow will go into an `awaiting approval` mode on Github Actions. Only authorized users may approve this workflow (notifications will be sent) to trigger the upload to PyPI. .. warning:: - Uploads to PyPI can **never** be overwritten. If you make a mistake, you will need to bump the version and re-release the package. If the package uploaded to PyPI is broken, you should modify the GitHub release to mark the package as broken, as well as yank the package (mark the version "broken") on PyPI. + Uploads to PyPI can **never** be overwritten. If you make a mistake, you will need to bump the version and re-release the package. If the package uploaded to PyPI is broken, you should modify the GitHub release to mark the package as broken, as well as yank the package (mark the version "broken") on PyPI. Packaging --------- @@ -44,21 +45,21 @@ When a new version has been minted (features have been successfully integrated t The simple approach ~~~~~~~~~~~~~~~~~~~ -The simplest approach to packaging for general support (pip wheels) requires that ``flit`` be installed:: +The simplest approach to packaging for general support (pip wheels) requires that `flit` be installed: - .. code-block:: console + .. code-block:: console - python -m pip install flit + python -m pip install flit -From the command line on your Linux distribution, simply run the following from the clone's main dev branch:: +From the command line on your Linux distribution, simply run the following from the clone's main dev branch: - .. code-block:: console + .. code-block:: console - # To build the packages (sources and wheel) - make dist + # To build the packages (sources and wheel) + make dist - # To upload to PyPI - make release + # To upload to PyPI + make release The new version based off of the version checked out will now be available via `pip` (`pip install {{ cookiecutter.project_slug }}`). @@ -72,14 +73,14 @@ Before preparing an initial release on conda-forge, we *strongly* suggest consul * https://conda-forge.org/docs/maintainer/adding_pkgs.html * https://github.com/conda-forge/staged-recipes -In order to create a new conda build recipe, to be used when proposing packages to the conda-forge repository, we strongly suggest using the ``grayskull`` tool:: +In order to create a new conda build recipe, to be used when proposing packages to the conda-forge repository, we strongly suggest using the `grayskull` tool: - .. code-block:: console + .. code-block:: console - python -m pip install grayskull - grayskull pypi {{ cookiecutter.project_slug }} + python -m pip install grayskull + grayskull pypi xncml -For more information on ``grayskull``, please see the following link: https://github.com/conda/grayskull +For more information on `grayskull`, please see the following link: https://github.com/conda/grayskull Before updating the main conda-forge recipe, we echo the conda-forge documentation and *strongly* suggest performing the following checks: * Ensure that dependencies and dependency versions correspond with those of the tagged version, with open or pinned versions for the `host` requirements. @@ -97,30 +98,31 @@ Building sources for wide support with `manylinux` image This section is for building source files that link to or provide links to C/C++ dependencies. It is not necessary to perform the following when building pure Python packages. -In order to do ensure best compatibility across architectures, we suggest building wheels using the `PyPA`'s `manylinux` +In order to do ensure best compatibility across architectures, we suggest building wheels using the `PyPA`'s `manylinux` docker images (at time of writing, we endorse using `manylinux_2_24_x86_64`). docker images (at time of writing, we endorse using `manylinux_2_24_x86_64`). -With `docker` installed and running, begin by pulling the image:: +With `docker` installed and running, begin by pulling the image: - .. code-block:: console + .. code-block:: console - sudo docker pull quay.io/pypa/manylinux_2_24_x86_64 + sudo docker pull quay.io/pypa/manylinux_2_24_x86_64 -From the {{ cookiecutter.project_slug }} source folder we can enter into the docker container, providing access to the `{{ cookiecutter.project_slug }}` source files by linking them to the running image:: +From the xncml source folder we can enter into the docker container, providing access to the `src/xncml` source files by linking them to the running image: - .. code-block:: console + .. code-block:: console - sudo docker run --rm -ti -v $(pwd):/{{ cookiecutter.project_slug }} -w /{{ cookiecutter.project_slug }} quay.io/pypa/manylinux_2_24_x86_64 bash + sudo docker run --rm -ti -v $(pwd):/src/xncml -w /src/xncml quay.io/pypa/manylinux_2_24_x86_64 bash -Finally, to build the wheel, we run it against the provided Python3.9 binary:: - .. code-block:: console +Finally, to build the wheel, we run it against the provided Python3.9 binary: - /opt/python/cp39-cp39m/bin/python -m build --sdist --wheel + .. code-block:: console -This will then place two files in `{{ cookiecutter.project_slug }}/dist/` ("{{ cookiecutter.project_slug }}-1.2.3-py3-none-any.whl" and "{{ cookiecutter.project_slug }}-1.2.3.tar.gz"). -We can now leave our docker container (`exit`) and continue with uploading the files to PyPI:: + /opt/python/cp39-cp39m/bin/python -m build --sdist --wheel - .. code-block:: console +This will then place two files in `src/xncml/dist/` ("xncml-1.2.3-py3-none-any.whl" and "xncml-1.2.3.tar.gz"). +We can now leave our docker container (`exit`) and continue with uploading the files to PyPI: - python -m twine upload dist/* + .. code-block:: console + + python -m twine upload dist/* diff --git a/pyproject.toml b/pyproject.toml index 5573e2b..e5fa370 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["flit_core >=3.8,<4"] +requires = ["flit_core >=3.9,<4"] build-backend = "flit_core.buildapi" [project] @@ -47,7 +47,7 @@ dependencies = [ [project.optional-dependencies] dev = [ # Dev tools and testing - "pip >=23.1.2", + "pip >=23.3.0", "bump-my-version >=0.18.3", "watchdog >=3.0.0", "flake8 >=6.1.0", @@ -65,7 +65,7 @@ dev = [ ] docs = [ # Documentation and examples - "sphinx", + "sphinx >=7.0.0", "sphinx-codeautolink", "sphinx-copybutton", "sphinx-rtd-theme >=1.0", @@ -144,9 +144,9 @@ include = [ "docs/Makefile", "docs/conf.py", "docs/make.bat", + "src/xncml", "tests/*.py", - "tox.ini", - "xncml" + "tox.ini" ] exclude = [ "*.py[co]", diff --git a/tox.ini b/tox.ini index a69f3c6..4e828fb 100644 --- a/tox.ini +++ b/tox.ini @@ -6,7 +6,7 @@ envlist = docs coveralls requires = - flit + flit >= 3.9.0 pip >= 23.3.0 opts = --verbose @@ -15,7 +15,7 @@ opts = skip_install = True deps = flake8 - ruff >=0.2.0 + ruff >=0.3.0 commands = make lint allowlist_externals = From 51ffa68e1e3b46a0d916fb94f7e5f8e53ac1164d Mon Sep 17 00:00:00 2001 From: Zeitsperre <10819524+Zeitsperre@users.noreply.github.com> Date: Tue, 21 May 2024 14:02:45 -0400 Subject: [PATCH 18/26] last adjustments --- CONTRIBUTING.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 56f2cf1..9c6d3d1 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -175,13 +175,13 @@ For more information on running tests, see the `pytest documentation Date: Fri, 5 Jul 2024 10:40:09 -0400 Subject: [PATCH 19/26] add CODE_OF_CONDUCT.md --- CODE_OF_CONDUCT.md | 133 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100644 CODE_OF_CONDUCT.md diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..894a602 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official email address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +[xarray-core-team@googlegroups.com](mailto:xarray-core-team@googlegroups.com). +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations From 71e83c98b123fef187d4c185bafbb729997d01bc Mon Sep 17 00:00:00 2001 From: Trevor James Smith <10819524+Zeitsperre@users.noreply.github.com> Date: Fri, 5 Jul 2024 11:30:26 -0400 Subject: [PATCH 20/26] fast-forward cookiecutter template --- .cruft.json | 6 +- .flake8 | 4 - .github/dependabot.yml | 2 +- .github/workflows/bump-version.yml | 4 +- .github/workflows/cache-cleaner.yml | 2 +- .github/workflows/dependency-review.yml | 5 +- .github/workflows/first-pull-request.yml | 2 +- .github/workflows/main.yml | 4 +- .github/workflows/publish-pypi.yml | 6 +- .github/workflows/scorecard.yml | 10 +- .github/workflows/tag-testpypi.yml | 12 +-- .github/workflows/workflow-warning.yml | 8 +- .pre-commit-config.yaml | 112 +++++++++++++---------- CONTRIBUTING.rst | 69 ++++++++++---- docs/conf.py | 1 + docs/index.rst | 16 +--- docs/releasing.rst | 4 +- pyproject.toml | 78 +++++++++++----- src/xncml/__init__.py | 21 ++++- tox.ini | 9 +- 20 files changed, 233 insertions(+), 142 deletions(-) diff --git a/.cruft.json b/.cruft.json index cdcc843..131ad09 100644 --- a/.cruft.json +++ b/.cruft.json @@ -1,12 +1,12 @@ { "template": "https://github.com/Ouranosinc/cookiecutter-pypackage.git", - "commit": "f9e0b049711af2023e9a3f5df594f4dbc25b07c1", + "commit": "3b3598ae9524e7eb495bcba91093fac3369e753a", "checkout": null, "context": { "cookiecutter": { "full_name": "Abel Aoun", "email": "aoun.abel@gmail.com", - "github_username": "bzah", + "github_username": "xarray-contrib", "project_name": "xncml", "project_slug": "xncml", "project_short_description": "Tools for manipulating NcML (NetCDF Markup Language) files with/for xarray", @@ -22,7 +22,7 @@ "create_author_file": "y", "open_source_license": "Apache Software License 2.0", "generated_with_cruft": "y", - "__gh_slug": "https://github.com/bzah/xncml", + "__gh_slug": "https://github.com/xarray-contrib/xncml", "_template": "https://github.com/Ouranosinc/cookiecutter-pypackage.git" } }, diff --git a/.flake8 b/.flake8 index 79b2dbd..2112ec1 100644 --- a/.flake8 +++ b/.flake8 @@ -6,10 +6,6 @@ exclude = docs, tests ignore = - AZ100, - AZ200, - AZ300, - AZ400 C, D, E, diff --git a/.github/dependabot.yml b/.github/dependabot.yml index d86e88c..56265d3 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,7 +1,7 @@ version: 2 updates: - package-ecosystem: github-actions - directory: / + directory: /.github/workflows/ schedule: interval: monthly open-pull-requests-limit: 10 diff --git a/.github/workflows/bump-version.yml b/.github/workflows/bump-version.yml index 780d4e4..27ca638 100644 --- a/.github/workflows/bump-version.yml +++ b/.github/workflows/bump-version.yml @@ -42,7 +42,7 @@ jobs: contents: write steps: - name: Harden Runner - uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 + uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1 with: disable-sudo: true egress-policy: block @@ -51,7 +51,7 @@ jobs: github.com:443 pypi.org:443 - name: Checkout Repository (no persist-credentials) - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: persist-credentials: false fetch-depth: 0 diff --git a/.github/workflows/cache-cleaner.yml b/.github/workflows/cache-cleaner.yml index 9824374..bb6cefa 100644 --- a/.github/workflows/cache-cleaner.yml +++ b/.github/workflows/cache-cleaner.yml @@ -16,7 +16,7 @@ jobs: actions: write steps: - name: Harden Runner - uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 + uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1 with: disable-sudo: true egress-policy: block diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index ecc6f22..0a53ecc 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -16,16 +16,17 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 + uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1 with: disable-sudo: true egress-policy: block allowed-endpoints: > + api.deps.dev:443 api.github.com:443 github.com:443 - name: Checkout Repository - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Dependency Review uses: actions/dependency-review-action@0c155c5e8556a497adf53f2c18edabf945ed8e70 # v4.3.2 diff --git a/.github/workflows/first-pull-request.yml b/.github/workflows/first-pull-request.yml index ec70a48..a747849 100644 --- a/.github/workflows/first-pull-request.yml +++ b/.github/workflows/first-pull-request.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 + uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1 with: disable-sudo: true egress-policy: block diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 6ba1704..b05f32d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -64,11 +64,11 @@ jobs: python-version: "3.12" steps: - name: Harden Runner - uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 + uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1 with: egress-policy: audit - name: Checkout Repository - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Set up Python${{ matrix.python-version }} uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index 0c30a2a..66e6816 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -18,7 +18,7 @@ jobs: id-token: write steps: - name: Harden Runner - uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 + uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1 with: disable-sudo: true egress-policy: block @@ -28,7 +28,7 @@ jobs: pypi.org:443 upload.pypi.org:443 - name: Checkout Repository - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Set up Python3 uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: @@ -40,4 +40,4 @@ jobs: run: | python -m flit build - name: Publish distribution 📦 to PyPI - uses: pypa/gh-action-pypi-publish@81e9d935c883d0b210363ab89cf05f3894778450 # v1.8.14 + uses: pypa/gh-action-pypi-publish@ec4db0b4ddc65acdf4bff5fa45ac92d78b56bdf0 # v1.9.0 diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 5b357a5..32aef91 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -29,7 +29,7 @@ jobs: id-token: write steps: - name: Harden Runner - uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1 with: disable-sudo: true egress-policy: block @@ -47,11 +47,11 @@ jobs: www.bestpractices.dev:443 - name: Checkout Repository - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: persist-credentials: false - - name: Run analysis + - name: Run Analysis uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # v2.3.3 with: results_file: results.sarif @@ -71,7 +71,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - - name: Upload artifact + - name: Upload Artifact uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 with: name: SARIF file @@ -80,6 +80,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: Upload to code-scanning - uses: github/codeql-action/upload-sarif@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # 3.25.5 + uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # 3.25.11 with: sarif_file: results.sarif diff --git a/.github/workflows/tag-testpypi.yml b/.github/workflows/tag-testpypi.yml index 8f4835b..299e8bb 100644 --- a/.github/workflows/tag-testpypi.yml +++ b/.github/workflows/tag-testpypi.yml @@ -17,11 +17,11 @@ jobs: contents: write steps: - name: Harden Runner - uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 + uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1 with: egress-policy: audit - - name: Checkout code - uses: actions/checkout@v4 + - name: Checkout Repository + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Create Release uses: softprops/action-gh-release@69320dbe05506a9a39fc8ae11030b214ec2d1f87 # 2.0.5 env: @@ -42,7 +42,7 @@ jobs: id-token: write steps: - name: Harden Runner - uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 + uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1 with: disable-sudo: true egress-policy: block @@ -52,7 +52,7 @@ jobs: pypi.org:443 test.pypi.org:443 - name: Checkout Repository - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Set up Python3 uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: @@ -64,7 +64,7 @@ jobs: run: | python -m flit build - name: Publish distribution 📦 to Test PyPI - uses: pypa/gh-action-pypi-publish@81e9d935c883d0b210363ab89cf05f3894778450 # v1.8.14 + uses: pypa/gh-action-pypi-publish@ec4db0b4ddc65acdf4bff5fa45ac92d78b56bdf0 # v1.9.0 with: repository-url: https://test.pypi.org/legacy/ skip-existing: true diff --git a/.github/workflows/workflow-warning.yml b/.github/workflows/workflow-warning.yml index a4887a7..690da1c 100644 --- a/.github/workflows/workflow-warning.yml +++ b/.github/workflows/workflow-warning.yml @@ -26,13 +26,13 @@ jobs: pull-requests: write steps: - name: Harden Runner - uses: step-security/harden-runner@a4aa98b93cab29d9b1101a6143fb8bce00e2eac4 # v2.7.1 + uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1 with: disable-sudo: true egress-policy: block allowed-endpoints: > api.github.com:443 - - name: Find comment + - name: Find Comment uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0 id: fc with: @@ -40,7 +40,7 @@ jobs: comment-author: 'github-actions[bot]' body-includes: | This Pull Request modifies GitHub workflows and is coming from a fork. - - name: Create comment + - name: Create Comment if: | (steps.fc.outputs.comment-id == '') && (!contains(github.event.pull_request.labels.*.name, 'approved')) && @@ -54,7 +54,7 @@ jobs: > This Pull Request modifies GitHub Workflows and is coming from a fork. **It is very important for the reviewer to ensure that the workflow changes are appropriate.** edit-mode: replace - - name: Update comment + - name: Update Comment if: | contains(github.event.pull_request.labels.*.name, 'approved') uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c11e5b2..43c16f5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,50 +2,68 @@ default_language_version: python: python3 repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 - hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: check-docstring-first - - id: check-json - - id: check-yaml - - id: check-toml - - repo: https://github.com/pappasam/toml-sort - rev: v0.23.1 - hooks: - - id: toml-sort-fix - - repo: https://github.com/pre-commit/pygrep-hooks - rev: v1.10.0 - hooks: - - id: python-check-blanket-noqa - - id: python-no-eval - - id: python-no-log-warn - - id: python-use-type-annotations - - id: rst-inline-touching-normal - - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.4 - hooks: - - id: ruff - args: [ --fix ] - - id: ruff-format - - repo: https://github.com/pycqa/flake8 - rev: 7.0.0 - hooks: - - id: flake8 - additional_dependencies: [ 'flake8-alphabetize', 'flake8-rst-docstrings' ] - args: [ '--config=.flake8' ] - - repo: https://github.com/adrienverge/yamllint.git - rev: v1.35.1 - hooks: - - id: yamllint - args: [ '--config-file=.yamllint.yaml' ] - - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.28.3 - hooks: - - id: check-github-workflows - - id: check-readthedocs - - repo: meta - hooks: - - id: check-hooks-apply - - id: check-useless-excludes + - repo: https://github.com/asottile/pyupgrade + rev: v3.16.0 + hooks: + - id: pyupgrade + args: [ '--py39-plus' ] + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.6.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-docstring-first + - id: check-json + - id: check-yaml + - id: check-toml + - repo: https://github.com/pappasam/toml-sort + rev: v0.23.1 + hooks: + - id: toml-sort-fix + - repo: https://github.com/pre-commit/pygrep-hooks + rev: v1.10.0 + hooks: + - id: python-check-blanket-noqa + - id: python-no-eval + - id: python-no-log-warn + - id: python-use-type-annotations + - id: rst-inline-touching-normal + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.5.0 + hooks: + - id: ruff + args: [ --fix ] + # - id: ruff-format + - repo: https://github.com/pycqa/flake8 + rev: 7.1.0 + hooks: + - id: flake8 + additional_dependencies: [ 'flake8-rst-docstrings' ] + args: [ '--config=.flake8' ] + - repo: https://github.com/adrienverge/yamllint.git + rev: v1.35.1 + hooks: + - id: yamllint + args: [ '--config-file=.yamllint.yaml' ] + - repo: https://github.com/python-jsonschema/check-jsonschema + rev: 0.28.6 + hooks: + - id: check-github-workflows + - id: check-readthedocs + - repo: meta + hooks: + - id: check-hooks-apply + - id: check-useless-excludes + + +ci: + autofix_commit_msg: | + [pre-commit.ci] auto fixes from pre-commit.com hooks + + for more information, see https://pre-commit.ci + autofix_prs: true + autoupdate_branch: '' + autoupdate_commit_msg: '[pre-commit.ci] pre-commit autoupdate' + autoupdate_schedule: quarterly + skip: [] + submodules: false diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 9c6d3d1..ec52547 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -44,8 +44,7 @@ If you are proposing a feature: * Explain in detail how it would work. * Keep the scope as narrow as possible, to make it easier to implement. -* Remember that this is a volunteer-driven project, and that contributions - are welcome. :) +* Remember that this is a volunteer-driven project, and that contributions are welcome. :) Get Started! ------------ @@ -69,12 +68,20 @@ Get Started! Ready to contribute? Here's how to set up ``xncml`` for local development. -#. Fork the ``xncml`` repo on GitHub. +#. First, clone the ``xncml`` repo locally. #. Clone your fork locally: - .. code-block:: console + * If you are not a ``xncml`` collaborator, first fork the ``xncml`` repo on GitHub, then clone your fork locally. + + .. code-block:: console + + git clone git@github.com:your_name_here/xncml.git + + * If you are a ``xncml`` collaborator, clone the ``xncml`` repo directly. + + .. code-block:: console - git clone git@github.com:your_name_here/xncml.git + git clone git@github.com:bzah/xncml.git #. Install your local copy into a development environment. You can create a new Anaconda development environment with: @@ -85,17 +92,24 @@ Ready to contribute? Here's how to set up ``xncml`` for local development. cd xncml/ make dev - This installs ``xncml`` in an "editable" state, meaning that changes to the code are immediately seen by the environment. To ensure a consistent coding style, `make dev` also installs the ``pre-commit`` hooks to your local clone. + If you are on Windows, replace the ``make dev`` command with the following: + + .. code-block:: console + + python -m pip install -e .[dev] + pre-commit install + + This installs ``xncml`` in an "editable" state, meaning that changes to the code are immediately seen by the environment. To ensure a consistent coding style, `make dev` also installs the ``pre-commit`` hooks to your local clone. - On commit, ``pre-commit`` will check that ``black``, ``blackdoc``, ``isort``, ``flake8``, and ``ruff`` checks are passing, perform automatic fixes if possible, and warn of violations that require intervention. If your commit fails the checks initially, simply fix the errors, re-add the files, and re-commit. + On commit, ``pre-commit`` will check that ``flake8``, and ``ruff`` checks are passing, perform automatic fixes if possible, and warn of violations that require intervention. If your commit fails the checks initially, simply fix the errors, re-add the files, and re-commit. - You can also run the hooks manually with: + You can also run the hooks manually with: .. code-block:: console pre-commit run -a - If you want to skip the ``pre-commit`` hooks temporarily, you can pass the ``--no-verify`` flag to `git commit`. + If you want to skip the ``pre-commit`` hooks temporarily, you can pass the `--no-verify` flag to `git commit`. #. Create a branch for local development: @@ -103,7 +117,7 @@ Ready to contribute? Here's how to set up ``xncml`` for local development. git checkout -b name-of-your-bugfix-or-feature - Now you can make your changes locally. + Now you can make your changes locally. #. When you're done making changes, we **strongly** suggest running the tests in your environment or with the help of ``tox``: @@ -121,7 +135,7 @@ Ready to contribute? Here's how to set up ``xncml`` for local development. git commit -m "Your detailed description of your changes." git push origin name-of-your-bugfix-or-feature - If ``pre-commit`` hooks fail, try re-committing your changes (or, if need be, you can skip them with `git commit --no-verify`). + If ``pre-commit`` hooks fail, try fixing the issues, re-staging the files to be committed, and re-committing your changes (or, if need be, you can skip them with `git commit --no-verify`). #. Submit a `Pull Request `_ through the GitHub website. @@ -137,24 +151,44 @@ Ready to contribute? Here's how to set up ``xncml`` for local development. # To simply test that the docs pass build checks python -m tox -e docs +#. If changes to your branch are made on GitHub, you can update your local branch with: + + .. code-block:: console + + git checkout name-of-your-bugfix-or-feature + git fetch + git pull origin name-of-your-bugfix-or-feature + + If you have merge conflicts, you might need to replace `git pull` with `git merge` and resolve the conflicts manually. + Resolving conflicts from the command line can be tricky. If you are not comfortable with this, you can ignore the last command and instead use a GUI like PyCharm or Visual Studio Code to merge the remote changes and resolve the conflicts. + +#. Before merging, your Pull Request will need to be based on the `main` branch of the ``xncml`` repository. If your branch is not up-to-date with the `main` branch, you can perform similar steps as above to update your branch: + + .. code-block:: console + + git checkout name-of-your-bugfix-or-feature + git fetch + git pull origin main + See the previous step for more information on resolving conflicts. + #. Once your Pull Request has been accepted and merged to the `main` branch, several automated workflows will be triggered: - The ``bump-version.yml`` workflow will automatically bump the patch version when pull requests are pushed to the `main` branch on GitHub. **It is not recommended to manually bump the version in your branch when merging (non-release) pull requests (this will cause the version to be bumped twice).** - `ReadTheDocs` will automatically build the documentation and publish it to the `latest` branch of `xncml` documentation website. - If your branch is not a fork (ie: you are a maintainer), your branch will be automatically deleted. - You will have contributed your first changes to ``xncml``! + You will have contributed to ``xncml``! Pull Request Guidelines ----------------------- Before you submit a pull request, check that it meets these guidelines: -#. The pull request should include tests and should aim to provide `code coverage `_ for all new lines of code. You can use the `--cov-report html --cov xncml` flags during the call to ``pytest`` to generate an HTML report and analyse the current test coverage. +#. All functions should be documented with `docstrings` following the `numpydoc `_ format. -#. If the pull request adds functionality, the docs should also be updated. Put your new functionality into a function with a docstring, and add the feature to the list in ``README.rst``. +#. If the pull request adds functionality, either update the documentation or create a new notebook that demonstrates the feature. Library-defining features should also be listed in ``README.rst``. -#. The pull request should work for Python 3.9, 3.10, 3.11, and 3.12. Check that the tests pass for all supported Python versions. +#. The pull request should work for all currently supported Python versions. Check the `pyproject.toml` or `tox.ini` files for the list of supported versions. Tips ---- @@ -177,13 +211,10 @@ To run specific code style checks: .. code-block:: console - python -m black --check src/xncml tests - python -m isort --check src/xncml tests - python -m blackdoc --check src/xncml docs python -m ruff check src/xncml tests python -m flake8 src/xncml tests -To get ``black``, ``isort``, ``blackdoc``, ``ruff``, and ``flake8`` (with plugins ``flake8-alphabetize`` and ``flake8-rst-docstrings``) simply install them with ``pip`` (or ``conda``) into your environment. +To get ``ruff``, and ``flake8`` (with the ``flake8-rst-docstrings`` plugin) simply install them with ``pip`` into your environment. Code of Conduct --------------- diff --git a/docs/conf.py b/docs/conf.py index a496e44..1ce2ae3 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -19,6 +19,7 @@ # import os import sys + sys.path.insert(0, os.path.abspath('..')) import xncml diff --git a/docs/index.rst b/docs/index.rst index 88a9dd6..6796378 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -14,22 +14,16 @@ Xncml Documentation authors changelog -`xncml` adds NcML support to xarray. It includes utilities to modify NcML documents, -and open NcML files as `xarray.Dataset`. -For more information on NcML, take a look at -[tutorials and examples](https://docs.unidata.ucar.edu/netcdf-java/current/userguide/basic_ncml_tutorial.html) -and the [annotated schema](https://docs.unidata.ucar.edu/netcdf-java/current/userguide/annotated_ncml_schema.html). +`xncml` adds NcML support to xarray. It includes utilities to modify NcML documents, and open NcML files as `xarray.Dataset`. +For more information on NcML, take a look at `tutorials and examples `_ +and the `annotated schema `_. - -Note the existence of a similar [project](https://github.com/ioos/ncml) -to edit NcML documents, now archived. +Note the existence of a similar `project `_ to edit NcML documents, now archived. Feedback ======== -If you encounter any errors or problems with **xncml**, -please open an Issue at the GitHub main repository. - +If you encounter any errors or problems with **xncml**,please open an Issue at the `GitHub main repository `_. Indices and tables ================== diff --git a/docs/releasing.rst b/docs/releasing.rst index 1365609..e5da320 100644 --- a/docs/releasing.rst +++ b/docs/releasing.rst @@ -61,7 +61,7 @@ From the command line on your Linux distribution, simply run the following from # To upload to PyPI make release -The new version based off of the version checked out will now be available via `pip` (`pip install {{ cookiecutter.project_slug }}`). +The new version based off of the version checked out will now be available via `pip` (`pip install xncml`). Releasing on conda-forge ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -84,7 +84,7 @@ For more information on `grayskull`, please see the following link: https://gith Before updating the main conda-forge recipe, we echo the conda-forge documentation and *strongly* suggest performing the following checks: * Ensure that dependencies and dependency versions correspond with those of the tagged version, with open or pinned versions for the `host` requirements. - * If possible, configure tests within the conda-forge build CI (e.g. `imports: {{ cookiecutter.project_slug }}`, `commands: pytest {{ cookiecutter.project_slug }}`). + * If possible, configure tests within the conda-forge build CI (e.g. `imports: xncml`, `commands: pytest xncml`). Subsequent releases ^^^^^^^^^^^^^^^^^^^ diff --git a/pyproject.toml b/pyproject.toml index e5fa370..1fa22e8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,20 +47,19 @@ dependencies = [ [project.optional-dependencies] dev = [ # Dev tools and testing - "pip >=23.3.0", - "bump-my-version >=0.18.3", + "pip >=24.0", + "bump-my-version >=0.23.0", "watchdog >=3.0.0", - "flake8 >=6.1.0", - "flake8-alphabetize >=0.0.21", + "flake8 >=7.0.0", "flake8-rst-docstrings >=0.3.0", "flit >=3.9.0", - "tox >=4.5.1", - "coverage >=7.0.0", + "tox >=4.15.1", + "coverage >=7.5.0", "coveralls >=3.3.1", "mypy", "pytest >=7.3.1", "pytest-cov >=4.0.0", - "ruff >=0.3.0", + "ruff >=0.4.0", "pre-commit >=3.3.2" ] docs = [ @@ -96,6 +95,33 @@ serialize = [ "{major}.{minor}.{patch}" ] +[[tool.bumpversion.files]] +filename = "CHANGELOG.rst" +include_bumps = ["release"] +search = """\ +`Unreleased `_ (latest) +------------------------------------------------------ +""" +replace = """\ +`Unreleased `_ (latest) +------------------------------------------------------ + +Contributors: + +Changes +^^^^^^^ +* No change. + +Fixes +^^^^^ +* No change. + +.. _changes_{new_version}: + +`v{new_version} `_ +---------------------------------------------------- +""" + [[tool.bumpversion.files]] filename = "src/xncml/__init__.py" search = "__version__ = \"{current_version}\"" @@ -208,29 +234,35 @@ docstring-code-format = true line-ending = "auto" [tool.ruff.lint] +extend-select = [ + "RUF022" # unsorted-dunder-all +] ignore = [ - "D205", - "D400", - "D401" + "COM", # commas + "D205", # blank-line-after-summary + "D400", # ends-in-period + "D401" # non-imperative-mood ] select = [ - "C9", - "D", - "E", - "F", - "W" + "BLE", # blind-except + "C90", # mccabe-complexity + "D", # docstrings + "E", # pycodestyle errors + "FLY002", # static-join-to-fstring + "G", # logging-format + "N", # naming conventions + "PERF", # iterator performance + "PTH", # pathlib + "RUF010", # explicit-f-string-type-conversion + "RUF013", # implicit-optional + "S", # bandit + "UP", # python version conventions + "W" # pycodestyle warnings ] [tool.ruff.lint.flake8-bandit] check-typed-exception = true -[tool.ruff.lint.isort] -known-first-party = ["xncml"] -case-sensitive = true -detect-same-package = false -lines-after-imports = 1 -no-lines-before = ["future", "standard-library"] - [tool.ruff.lint.mccabe] max-complexity = 15 @@ -238,8 +270,8 @@ max-complexity = 15 "docs/**" = ["E402"] "src/xncml/**/__init__.py" = ["F401", "F403"] "src/xncml/generated/*.py" = ["D"] -"tests/**/*.py" = ["D"] "src/xncml/parser.py" = ["C901"] # To be added +"tests/**/*.py" = ["D", "S101"] [tool.ruff.lint.pycodestyle] max-doc-length = 180 diff --git a/src/xncml/__init__.py b/src/xncml/__init__.py index 955d40f..64f99aa 100644 --- a/src/xncml/__init__.py +++ b/src/xncml/__init__.py @@ -1,5 +1,22 @@ -#!/usr/bin/env python -"""Top-level module for xncml.""" +"""Tools for manipulating NcML (NetCDF Markup Language) files with/for xarray""" + +################################################################################### +# Apache Software License 2.0 +# +# Copyright (c) 2019-2024, Anderson Banihirwe, David Huard +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################### from .core import Dataset from .parser import open_ncml diff --git a/tox.ini b/tox.ini index 4e828fb..efd8cf9 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -min_version = 4.0 +min_version = 4.15.1 envlist = lint py{39,310,311,312} @@ -7,15 +7,16 @@ envlist = coveralls requires = flit >= 3.9.0 - pip >= 23.3.0 + pip >= 24.0 opts = --verbose [testenv:lint] skip_install = True deps = - flake8 - ruff >=0.3.0 + flake8 >=7.0.0 + flake8-rst-docstrings >=0.3.0 + ruff >=0.4.0 commands = make lint allowlist_externals = From ded56815af4e5ae75db3c6345623e5bbffcb4637 Mon Sep 17 00:00:00 2001 From: Trevor James Smith <10819524+Zeitsperre@users.noreply.github.com> Date: Fri, 5 Jul 2024 11:34:40 -0400 Subject: [PATCH 21/26] disable pre-commit ci for now --- .pre-commit-config.yaml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 43c16f5..7cf5ba1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -56,14 +56,14 @@ repos: - id: check-useless-excludes -ci: - autofix_commit_msg: | - [pre-commit.ci] auto fixes from pre-commit.com hooks +# ci: +# autofix_commit_msg: | +# [pre-commit.ci] auto fixes from pre-commit.com hooks - for more information, see https://pre-commit.ci - autofix_prs: true - autoupdate_branch: '' - autoupdate_commit_msg: '[pre-commit.ci] pre-commit autoupdate' - autoupdate_schedule: quarterly - skip: [] - submodules: false +# for more information, see https://pre-commit.ci +# autofix_prs: true +# autoupdate_branch: '' +# autoupdate_commit_msg: '[pre-commit.ci] pre-commit autoupdate' +# autoupdate_schedule: quarterly +# skip: [] +# submodules: false From ef1ead437e4cc4d41cdf83739af1cfecf28b131f Mon Sep 17 00:00:00 2001 From: Trevor James Smith <10819524+Zeitsperre@users.noreply.github.com> Date: Fri, 5 Jul 2024 11:41:18 -0400 Subject: [PATCH 22/26] do not run pyupgrade yet --- .pre-commit-config.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7cf5ba1..aaf2539 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,11 +2,11 @@ default_language_version: python: python3 repos: - - repo: https://github.com/asottile/pyupgrade - rev: v3.16.0 - hooks: - - id: pyupgrade - args: [ '--py39-plus' ] + # - repo: https://github.com/asottile/pyupgrade + # rev: v3.16.0 + # hooks: + # - id: pyupgrade + # args: [ '--py39-plus' ] - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.6.0 hooks: From 585e6e11b7eb3e4be8682c4f1b21172e58b7b56b Mon Sep 17 00:00:00 2001 From: Trevor James Smith <10819524+Zeitsperre@users.noreply.github.com> Date: Fri, 5 Jul 2024 11:49:57 -0400 Subject: [PATCH 23/26] run safe fixes, set ignored ruff violations --- pyproject.toml | 14 +++++++++++++- src/xncml/core.py | 2 +- src/xncml/generated/ncml_2_2.py | 10 +++++----- tests/test_parser.py | 2 +- 4 files changed, 20 insertions(+), 8 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1fa22e8..404d05e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -241,7 +241,19 @@ ignore = [ "COM", # commas "D205", # blank-line-after-summary "D400", # ends-in-period - "D401" # non-imperative-mood + "D401", # non-imperative-mood + # The following ignore codes are disabled because the codebase is not yet compliant + "BLE001", # Do not catch blind exception: `Exception` + "PERF203", # `try`-`except` within a loop incurs performance overhead + "PTH100", # `os.path.abspath()` should be replaced by `Path.resolve()` + "PTH107", # `os.remove()` should be replaced by `Path.unlink()` + "PTH110", # ``os.path.exists()` should be replaced by `Path.exists()` + "PTH120", # `os.path.dirname()` should be replaced by `Path.parent` + "PTH123", # `open()` should be replaced by `Path.open()` + "RUF013", # PEP 484 prohibits implicit `Optional` + "S110", # `try`-`except`-`pass` detected, consider logging the exception + "UP006", # Use `list` instead of `List` for type annotation + "UP007" # Use `X | Y` for type annotations ] select = [ "BLE", # blind-except diff --git a/src/xncml/core.py b/src/xncml/core.py index 6bc8c0c..2f742e8 100644 --- a/src/xncml/core.py +++ b/src/xncml/core.py @@ -13,7 +13,7 @@ import xmltodict -class Dataset(object): +class Dataset: """A class for reading and manipulating NcML file. Note that NcML documents are used for two distinct purposes: diff --git a/src/xncml/generated/ncml_2_2.py b/src/xncml/generated/ncml_2_2.py index fc4a182..2e7f5b4 100644 --- a/src/xncml/generated/ncml_2_2.py +++ b/src/xncml/generated/ncml_2_2.py @@ -380,7 +380,7 @@ class Meta: "type": "Element", }, ) - variable: List["Variable"] = field( + variable: List[Variable] = field( default_factory=list, metadata={ "type": "Element", @@ -535,7 +535,7 @@ class Meta: ), }, ) - variable_agg: List["Aggregation.VariableAgg"] = field( + variable_agg: List[Aggregation.VariableAgg] = field( default_factory=list, metadata={ "name": "variableAgg", @@ -556,19 +556,19 @@ class Meta: "type": "Element", }, ) - netcdf: List["Netcdf"] = field( + netcdf: List[Netcdf] = field( default_factory=list, metadata={ "type": "Element", }, ) - scan: List["Aggregation.Scan"] = field( + scan: List[Aggregation.Scan] = field( default_factory=list, metadata={ "type": "Element", }, ) - scan_fmrc: List["Aggregation.ScanFmrc"] = field( + scan_fmrc: List[Aggregation.ScanFmrc] = field( default_factory=list, metadata={ "name": "scanFmrc", diff --git a/tests/test_parser.py b/tests/test_parser.py index 3784e09..ebfcc76 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -16,7 +16,7 @@ data = Path(__file__).parent / "data" -class CheckClose(object): +class CheckClose: """Check that files are closed after the test. Note that `close` has to be explicitly called within the context manager for this to work. """ From cef99b0aeef1432bd4a5e1625db69cbdadb7e38f Mon Sep 17 00:00:00 2001 From: Trevor James Smith <10819524+Zeitsperre@users.noreply.github.com> Date: Fri, 5 Jul 2024 12:11:14 -0400 Subject: [PATCH 24/26] fix coverage configuration --- pyproject.toml | 4 ++-- tests/__init__.py | 0 tox.ini | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 tests/__init__.py diff --git a/pyproject.toml b/pyproject.toml index 404d05e..e2fefd3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -148,9 +148,9 @@ values = [ ] [tool.coverage.run] -relative_files = true -include = ["src/xncml/*"] omit = ["tests/*.py"] +relative_files = true +source = ["xncml"] [tool.flit.sdist] include = [ diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/tox.ini b/tox.ini index efd8cf9..e80d88d 100644 --- a/tox.ini +++ b/tox.ini @@ -35,6 +35,7 @@ setenv = PYTEST_ADDOPTS = "--color=yes" PYTHONPATH = {toxinidir} passenv = + COVERALLS_* GITHUB_* extras = dev From f21df8e7024c1ad3b8e3bb7baa7bedbeb07e1a3b Mon Sep 17 00:00:00 2001 From: Trevor James Smith <10819524+Zeitsperre@users.noreply.github.com> Date: Fri, 5 Jul 2024 12:35:45 -0400 Subject: [PATCH 25/26] fix coveralls reporting --- .cruft.json | 2 +- .github/workflows/main.yml | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.cruft.json b/.cruft.json index 131ad09..4fc6997 100644 --- a/.cruft.json +++ b/.cruft.json @@ -1,6 +1,6 @@ { "template": "https://github.com/Ouranosinc/cookiecutter-pypackage.git", - "commit": "3b3598ae9524e7eb495bcba91093fac3369e753a", + "commit": "58525bb6c287ad11c874d7b8531c3597232e2d93", "checkout": null, "context": { "cookiecutter": { diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index b05f32d..870bc20 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -54,13 +54,13 @@ jobs: strategy: matrix: include: - - tox-env: "py39" + - tox-env: "py39-coveralls" python-version: "3.9" - - tox-env: "py310" + - tox-env: "py310-coveralls" python-version: "3.10" - - tox-env: "py311" + - tox-env: "py311-coveralls" python-version: "3.11" - - tox-env: "py312" + - tox-env: "py312-coveralls" python-version: "3.12" steps: - name: Harden Runner From 445b7e8e6351d073894331a32c896cdfd7f34313 Mon Sep 17 00:00:00 2001 From: Trevor James Smith <10819524+Zeitsperre@users.noreply.github.com> Date: Fri, 5 Jul 2024 12:57:24 -0400 Subject: [PATCH 26/26] re-enable pre-commit ci --- .pre-commit-config.yaml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index aaf2539..781ee21 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -56,14 +56,14 @@ repos: - id: check-useless-excludes -# ci: -# autofix_commit_msg: | -# [pre-commit.ci] auto fixes from pre-commit.com hooks +ci: + autofix_commit_msg: | + [pre-commit.ci] auto fixes from pre-commit.com hooks -# for more information, see https://pre-commit.ci -# autofix_prs: true -# autoupdate_branch: '' -# autoupdate_commit_msg: '[pre-commit.ci] pre-commit autoupdate' -# autoupdate_schedule: quarterly -# skip: [] -# submodules: false + for more information, see https://pre-commit.ci + autofix_prs: true + autoupdate_branch: '' + autoupdate_commit_msg: '[pre-commit.ci] pre-commit autoupdate' + autoupdate_schedule: quarterly + skip: [] + submodules: false