summaryrefslogtreecommitdiff
path: root/silx/io
diff options
context:
space:
mode:
Diffstat (limited to 'silx/io')
-rw-r--r--silx/io/dictdump.py125
-rw-r--r--silx/io/nxdata/parse.py111
-rw-r--r--silx/io/octaveh5.py4
-rw-r--r--silx/io/test/test_dictdump.py190
-rwxr-xr-xsilx/io/test/test_fabioh5.py22
-rw-r--r--silx/io/test/test_nxdata.py7
-rw-r--r--silx/io/test/test_spectoh5.py11
-rw-r--r--silx/io/test/test_url.py9
-rw-r--r--silx/io/test/test_utils.py2
-rw-r--r--silx/io/url.py8
-rw-r--r--silx/io/utils.py6
11 files changed, 443 insertions, 52 deletions
diff --git a/silx/io/dictdump.py b/silx/io/dictdump.py
index da1bc5c..f2318e0 100644
--- a/silx/io/dictdump.py
+++ b/silx/io/dictdump.py
@@ -1,6 +1,6 @@
# coding: utf-8
# /*##########################################################################
-# Copyright (C) 2016-2019 European Synchrotron Radiation Facility
+# Copyright (C) 2016-2020 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -154,14 +154,19 @@ def dicttoh5(treedict, h5file, h5path='/',
any other data type, it is cast into a numpy array and written as a
:mod:`h5py` dataset. Dictionary keys must be strings and cannot contain
the ``/`` character.
+
+ If dictionary keys are tuples they are interpreted to set h5 attributes.
+ The tuples should have the format (dataset_name,attr_name)
.. note::
This function requires `h5py <http://www.h5py.org/>`_ to be installed.
- :param treedict: Nested dictionary/tree structure with strings as keys
- and array-like objects as leafs. The ``"/"`` character is not allowed
- in keys.
+ :param treedict: Nested dictionary/tree structure with strings or tuples as
+ keys and array-like objects as leafs. The ``"/"`` character can be used
+ to define sub trees. If tuples are used as keys they should have the
+ format (dataset_name,attr_name) and will add a 5h attribute with the
+ corresponding value.
:param h5file: HDF5 file name or handle. If a file name is provided, the
function opens the file in the specified mode and closes it again
before completing.
@@ -186,10 +191,12 @@ def dicttoh5(treedict, h5file, h5path='/',
"Europe": {
"France": {
"Isère": {
- "Grenoble": "18.44 km2"
+ "Grenoble": 18.44,
+ ("Grenoble","unit"): "km2"
},
"Nord": {
- "Tourcoing": "15.19 km2"
+ "Tourcoing": 15.19,
+ ("Tourcoing","unit"): "km2"
},
},
},
@@ -207,7 +214,11 @@ def dicttoh5(treedict, h5file, h5path='/',
h5path += "/"
with _SafeH5FileWrite(h5file, mode=mode) as h5f:
- for key in treedict:
+ if isinstance(treedict, dict) and h5path != "/":
+ if h5path not in h5f:
+ h5f.create_group(h5path)
+
+ for key in filter(lambda k: not isinstance(k, tuple), treedict):
if isinstance(treedict[key], dict) and len(treedict[key]):
# non-empty group: recurse
dicttoh5(treedict[key], h5f, h5path + key,
@@ -253,6 +264,106 @@ def dicttoh5(treedict, h5file, h5path='/',
data=ds,
**create_dataset_args)
+ # deal with h5 attributes which have tuples as keys in treedict
+ for key in filter(lambda k: isinstance(k, tuple), treedict):
+ if (h5path + key[0]) not in h5f:
+ # Create empty group if key for attr does not exist
+ h5f.create_group(h5path + key[0])
+ logger.warning(
+ "key (%s) does not exist. attr %s "
+ "will be written to ." % (h5path + key[0], key[1])
+ )
+
+ if key[1] in h5f[h5path + key[0]].attrs:
+ if not overwrite_data:
+ logger.warning(
+ "attribute %s@%s already exists. Not overwriting."
+ "" % (h5path + key[0], key[1])
+ )
+ continue
+
+ # Write attribute
+ value = treedict[key]
+
+ # Makes list/tuple of str being encoded as vlen unicode array
+ # Workaround for h5py<2.9.0 (e.g. debian 10).
+ if (isinstance(value, (list, tuple)) and
+ numpy.asarray(value).dtype.type == numpy.unicode_):
+ value = numpy.array(value, dtype=h5py.special_dtype(vlen=str))
+
+ h5f[h5path + key[0]].attrs[key[1]] = value
+
+
+def dicttonx(
+ treedict,
+ h5file,
+ h5path="/",
+ mode="w",
+ overwrite_data=False,
+ create_dataset_args=None,
+):
+ """
+ Write a nested dictionary to a HDF5 file, using string keys as member names.
+ The NeXus convention is used to identify attributes with ``"@"`` character,
+ therefor the dataset_names should not contain ``"@"``.
+
+ :param treedict: Nested dictionary/tree structure with strings as keys
+ and array-like objects as leafs. The ``"/"`` character can be used
+ to define sub tree. The ``"@"`` character is used to write attributes.
+
+ Detais on all other params can be found in doc of dicttoh5.
+
+ Example::
+
+ import numpy
+ from silx.io.dictdump import dicttonx
+
+ gauss = {
+ "entry":{
+ "title":u"A plot of a gaussian",
+ "plot": {
+ "y": numpy.array([0.08, 0.19, 0.39, 0.66, 0.9, 1.,
+ 0.9, 0.66, 0.39, 0.19, 0.08]),
+ "x": numpy.arange(0,1.1,.1),
+ "@signal": "y",
+ "@axes": "x",
+ "@NX_class":u"NXdata",
+ "title:u"Gauss Plot",
+ },
+ "@NX_class":u"NXentry",
+ "default":"plot",
+ }
+ "@NX_class": u"NXroot",
+ "@default": "entry",
+ }
+
+ dicttonx(gauss,"test.h5")
+ """
+
+ def copy_keys_keep_values(original):
+ # create a new treedict with with modified keys but keep values
+ copy = dict()
+ for key, value in original.items():
+ if "@" in key:
+ newkey = tuple(key.rsplit("@", 1))
+ else:
+ newkey = key
+ if isinstance(value, dict):
+ copy[newkey] = copy_keys_keep_values(value)
+ else:
+ copy[newkey] = value
+ return copy
+
+ nxtreedict = copy_keys_keep_values(treedict)
+ dicttoh5(
+ nxtreedict,
+ h5file,
+ h5path=h5path,
+ mode=mode,
+ overwrite_data=overwrite_data,
+ create_dataset_args=create_dataset_args,
+ )
+
def _name_contains_string_in_list(name, strlist):
if strlist is None:
diff --git a/silx/io/nxdata/parse.py b/silx/io/nxdata/parse.py
index cce47ab..6bd18d6 100644
--- a/silx/io/nxdata/parse.py
+++ b/silx/io/nxdata/parse.py
@@ -1,7 +1,7 @@
# coding: utf-8
# /*##########################################################################
#
-# Copyright (c) 2017-2019 European Synchrotron Radiation Facility
+# Copyright (c) 2017-2020 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -41,6 +41,7 @@ Other public functions:
"""
+import json
import numpy
import six
@@ -53,13 +54,83 @@ from ._utils import get_attr_as_unicode, INTERPDIM, nxdata_logger, \
__authors__ = ["P. Knobel"]
__license__ = "MIT"
-__date__ = "15/02/2019"
+__date__ = "24/03/2020"
class InvalidNXdataError(Exception):
pass
+class _SilxStyle(object):
+ """NXdata@SILX_style parser.
+
+ :param NXdata nxdata:
+ NXdata description for which to extract silx_style information.
+ """
+
+ def __init__(self, nxdata):
+ naxes = len(nxdata.axes)
+ self._axes_scale_types = [None] * naxes
+ self._signal_scale_type = None
+
+ stylestr = get_attr_as_unicode(nxdata.group, "SILX_style")
+ if stylestr is None:
+ return
+
+ try:
+ style = json.loads(stylestr)
+ except json.JSONDecodeError:
+ nxdata_logger.error(
+ "Ignoring SILX_style, cannot parse: %s", stylestr)
+ return
+
+ if not isinstance(style, dict):
+ nxdata_logger.error(
+ "Ignoring SILX_style, cannot parse: %s", stylestr)
+
+ if 'axes_scale_types' in style:
+ axes_scale_types = style['axes_scale_types']
+
+ if isinstance(axes_scale_types, str):
+ # Convert single argument to list
+ axes_scale_types = [axes_scale_types]
+
+ if not isinstance(axes_scale_types, list):
+ nxdata_logger.error(
+ "Ignoring SILX_style:axes_scale_types, not a list")
+ else:
+ for scale_type in axes_scale_types:
+ if scale_type not in ('linear', 'log'):
+ nxdata_logger.error(
+ "Ignoring SILX_style:axes_scale_types, invalid value: %s", str(scale_type))
+ break
+ else: # All values are valid
+ if len(axes_scale_types) > naxes:
+ nxdata_logger.error(
+ "Clipping SILX_style:axes_scale_types, too many values")
+ axes_scale_types = axes_scale_types[:naxes]
+ elif len(axes_scale_types) < naxes:
+ # Extend axes_scale_types with None to match number of axes
+ axes_scale_types = [None] * (naxes - len(axes_scale_types)) + axes_scale_types
+ self._axes_scale_types = tuple(axes_scale_types)
+
+ if 'signal_scale_type' in style:
+ scale_type = style['signal_scale_type']
+ if scale_type not in ('linear', 'log'):
+ nxdata_logger.error(
+ "Ignoring SILX_style:signal_scale_type, invalid value: %s", str(scale_type))
+ else:
+ self._signal_scale_type = scale_type
+
+ axes_scale_types = property(
+ lambda self: self._axes_scale_types,
+ doc="Tuple of NXdata axes scale types (None, 'linear' or 'log'). List[str]")
+
+ signal_scale_type = property(
+ lambda self: self._signal_scale_type,
+ doc="NXdata signal scale type (None, 'linear' or 'log'). str")
+
+
class NXdata(object):
"""NXdata parser.
@@ -76,6 +147,7 @@ class NXdata(object):
"""
def __init__(self, group, validate=True):
super(NXdata, self).__init__()
+ self._plot_style = None
self.group = group
"""h5py-like group object with @NX_class=NXdata.
@@ -147,6 +219,8 @@ class NXdata(object):
# excludes scatters
self.signal_is_1d = self.signal_is_1d and len(self.axes) <= 1 # excludes n-D scatters
+ self._plot_style = _SilxStyle(self)
+
def _validate(self):
"""Fill :attr:`issues` with error messages for each error found."""
if not is_group(self.group):
@@ -250,8 +324,18 @@ class NXdata(object):
"dimensions as axis '%s'." % axis_name)
# test dimensions of errors associated with signal
+
+ signal_errors = signal_name + "_errors"
if "errors" in self.group and is_dataset(self.group["errors"]):
- if self.group["errors"].shape != self.group[signal_name].shape:
+ errors = "errors"
+ elif signal_errors in self.group and is_dataset(self.group[signal_errors]):
+ errors = signal_errors
+ else:
+ errors = None
+ if errors:
+ if self.group[errors].shape != self.group[signal_name].shape:
+ # In principle just the same size should be enough but
+ # NeXus documentation imposes to have the same shape
self.issues.append(
"Dataset containing standard deviations must " +
"have the same dimensions as the signal.")
@@ -629,9 +713,26 @@ class NXdata(object):
if not self.is_valid:
raise InvalidNXdataError("Unable to parse invalid NXdata")
- if "errors" not in self.group:
+ # case of signal
+ signal_errors = self.signal_dataset_name + "_errors"
+ if "errors" in self.group and is_dataset(self.group["errors"]):
+ errors = "errors"
+ elif signal_errors in self.group and is_dataset(self.group[signal_errors]):
+ errors = signal_errors
+ else:
return None
- return self.group["errors"]
+ return self.group[errors]
+
+ @property
+ def plot_style(self):
+ """Information extracted from the optional SILX_style attribute
+
+ :raises: InvalidNXdataError
+ """
+ if not self.is_valid:
+ raise InvalidNXdataError("Unable to parse invalid NXdata")
+
+ return self._plot_style
@property
def is_scatter(self):
diff --git a/silx/io/octaveh5.py b/silx/io/octaveh5.py
index 04e3890..84fa726 100644
--- a/silx/io/octaveh5.py
+++ b/silx/io/octaveh5.py
@@ -1,6 +1,6 @@
# coding: utf-8
# /*##########################################################################
-# Copyright (C) 2016 European Synchrotron Radiation Facility
+# Copyright (C) 2016-2020 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -44,7 +44,7 @@ Here is an example of a simple read and write :
strucDict = reader.get('mt_struct_name')
.. note:: These functions depend on the `h5py <http://www.h5py.org/>`_
- library, which is not a mandatory dependency for `silx`.
+ library, which is a mandatory dependency for `silx`.
"""
diff --git a/silx/io/test/test_dictdump.py b/silx/io/test/test_dictdump.py
index 12e13f5..c0b6914 100644
--- a/silx/io/test/test_dictdump.py
+++ b/silx/io/test/test_dictdump.py
@@ -1,6 +1,6 @@
# coding: utf-8
# /*##########################################################################
-# Copyright (C) 2016-2019 European Synchrotron Radiation Facility
+# Copyright (C) 2016-2020 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -39,6 +39,7 @@ from collections import defaultdict
from silx.utils.testutils import TestLogging
from ..configdict import ConfigDict
+from .. import dictdump
from ..dictdump import dicttoh5, dicttojson, dump
from ..dictdump import h5todict, load
from ..dictdump import logger as dictdump_logger
@@ -64,7 +65,8 @@ class TestDictToH5(unittest.TestCase):
self.h5_fname = os.path.join(self.tempdir, "cityattrs.h5")
def tearDown(self):
- os.unlink(self.h5_fname)
+ if os.path.exists(self.h5_fname):
+ os.unlink(self.h5_fname)
os.rmdir(self.tempdir)
def testH5CityAttrs(self):
@@ -73,7 +75,7 @@ class TestDictToH5(unittest.TestCase):
dicttoh5(city_attrs, self.h5_fname, h5path='/city attributes',
mode="w", create_dataset_args=filters)
- h5f = h5py.File(self.h5_fname)
+ h5f = h5py.File(self.h5_fname, mode='r')
self.assertIn("Tourcoing/area", h5f["/city attributes/Europe/France"])
ds = h5f["/city attributes/Europe/France/Grenoble/inhabitants"]
@@ -110,6 +112,174 @@ class TestDictToH5(unittest.TestCase):
res = h5todict(self.h5_fname)
assert(res['t'] == False)
+ def testAttributes(self):
+ """Any kind of attribute can be described"""
+ ddict = {
+ "group": {"datatset": "hmmm", ("", "group_attr"): 10},
+ "dataset": "aaaaaaaaaaaaaaa",
+ ("", "root_attr"): 11,
+ ("dataset", "dataset_attr"): 12,
+ ("group", "group_attr2"): 13,
+ }
+ with h5py.File(self.h5_fname, "w") as h5file:
+ dictdump.dicttoh5(ddict, h5file)
+ self.assertEqual(h5file["group"].attrs['group_attr'], 10)
+ self.assertEqual(h5file.attrs['root_attr'], 11)
+ self.assertEqual(h5file["dataset"].attrs['dataset_attr'], 12)
+ self.assertEqual(h5file["group"].attrs['group_attr2'], 13)
+
+ def testPathAttributes(self):
+ """A group is requested at a path"""
+ ddict = {
+ ("", "NX_class"): 'NXcollection',
+ }
+ with h5py.File(self.h5_fname, "w") as h5file:
+ # This should not warn
+ with TestLogging(dictdump_logger, warning=0):
+ dictdump.dicttoh5(ddict, h5file, h5path="foo/bar")
+
+ def testKeyOrder(self):
+ ddict1 = {
+ "d": "plow",
+ ("d", "a"): "ox",
+ }
+ ddict2 = {
+ ("d", "a"): "ox",
+ "d": "plow",
+ }
+ with h5py.File(self.h5_fname, "w") as h5file:
+ dictdump.dicttoh5(ddict1, h5file, h5path="g1")
+ dictdump.dicttoh5(ddict2, h5file, h5path="g2")
+ self.assertEqual(h5file["g1/d"].attrs['a'], "ox")
+ self.assertEqual(h5file["g2/d"].attrs['a'], "ox")
+
+ def testAttributeValues(self):
+ """Any NX data types can be used"""
+ ddict = {
+ ("", "bool"): True,
+ ("", "int"): 11,
+ ("", "float"): 1.1,
+ ("", "str"): "a",
+ ("", "boollist"): [True, False, True],
+ ("", "intlist"): [11, 22, 33],
+ ("", "floatlist"): [1.1, 2.2, 3.3],
+ ("", "strlist"): ["a", "bb", "ccc"],
+ }
+ with h5py.File(self.h5_fname, "w") as h5file:
+ dictdump.dicttoh5(ddict, h5file)
+ for k, expected in ddict.items():
+ result = h5file.attrs[k[1]]
+ if isinstance(expected, list):
+ if isinstance(expected[0], str):
+ numpy.testing.assert_array_equal(result, expected)
+ else:
+ numpy.testing.assert_array_almost_equal(result, expected)
+ else:
+ self.assertEqual(result, expected)
+
+ def testAttributeAlreadyExists(self):
+ """A duplicated attribute warns if overwriting is not enabled"""
+ ddict = {
+ "group": {"dataset": "hmmm", ("", "attr"): 10},
+ ("group", "attr"): 10,
+ }
+ with h5py.File(self.h5_fname, "w") as h5file:
+ with TestLogging(dictdump_logger, warning=1):
+ dictdump.dicttoh5(ddict, h5file)
+ self.assertEqual(h5file["group"].attrs['attr'], 10)
+
+ def testFlatDict(self):
+ """Description of a tree with a single level of keys"""
+ ddict = {
+ "group/group/dataset": 10,
+ ("group/group/dataset", "attr"): 11,
+ ("group/group", "attr"): 12,
+ }
+ with h5py.File(self.h5_fname, "w") as h5file:
+ dictdump.dicttoh5(ddict, h5file)
+ self.assertEqual(h5file["group/group/dataset"][()], 10)
+ self.assertEqual(h5file["group/group/dataset"].attrs['attr'], 11)
+ self.assertEqual(h5file["group/group"].attrs['attr'], 12)
+
+
+class TestDictToNx(unittest.TestCase):
+ def setUp(self):
+ self.tempdir = tempfile.mkdtemp()
+ self.h5_fname = os.path.join(self.tempdir, "nx.h5")
+
+ def tearDown(self):
+ if os.path.exists(self.h5_fname):
+ os.unlink(self.h5_fname)
+ os.rmdir(self.tempdir)
+
+ def testAttributes(self):
+ """Any kind of attribute can be described"""
+ ddict = {
+ "group": {"datatset": "hmmm", "@group_attr": 10},
+ "dataset": "aaaaaaaaaaaaaaa",
+ "@root_attr": 11,
+ "dataset@dataset_attr": 12,
+ "group@group_attr2": 13,
+ }
+ with h5py.File(self.h5_fname, "w") as h5file:
+ dictdump.dicttonx(ddict, h5file)
+ self.assertEqual(h5file["group"].attrs['group_attr'], 10)
+ self.assertEqual(h5file.attrs['root_attr'], 11)
+ self.assertEqual(h5file["dataset"].attrs['dataset_attr'], 12)
+ self.assertEqual(h5file["group"].attrs['group_attr2'], 13)
+
+ def testKeyOrder(self):
+ ddict1 = {
+ "d": "plow",
+ "d@a": "ox",
+ }
+ ddict2 = {
+ "d@a": "ox",
+ "d": "plow",
+ }
+ with h5py.File(self.h5_fname, "w") as h5file:
+ dictdump.dicttonx(ddict1, h5file, h5path="g1")
+ dictdump.dicttonx(ddict2, h5file, h5path="g2")
+ self.assertEqual(h5file["g1/d"].attrs['a'], "ox")
+ self.assertEqual(h5file["g2/d"].attrs['a'], "ox")
+
+ def testAttributeValues(self):
+ """Any NX data types can be used"""
+ ddict = {
+ "@bool": True,
+ "@int": 11,
+ "@float": 1.1,
+ "@str": "a",
+ "@boollist": [True, False, True],
+ "@intlist": [11, 22, 33],
+ "@floatlist": [1.1, 2.2, 3.3],
+ "@strlist": ["a", "bb", "ccc"],
+ }
+ with h5py.File(self.h5_fname, "w") as h5file:
+ dictdump.dicttonx(ddict, h5file)
+ for k, expected in ddict.items():
+ result = h5file.attrs[k[1:]]
+ if isinstance(expected, list):
+ if isinstance(expected[0], str):
+ numpy.testing.assert_array_equal(result, expected)
+ else:
+ numpy.testing.assert_array_almost_equal(result, expected)
+ else:
+ self.assertEqual(result, expected)
+
+ def testFlatDict(self):
+ """Description of a tree with a single level of keys"""
+ ddict = {
+ "group/group/dataset": 10,
+ "group/group/dataset@attr": 11,
+ "group/group@attr": 12,
+ }
+ with h5py.File(self.h5_fname, "w") as h5file:
+ dictdump.dicttonx(ddict, h5file)
+ self.assertEqual(h5file["group/group/dataset"][()], 10)
+ self.assertEqual(h5file["group/group/dataset"].attrs['attr'], 11)
+ self.assertEqual(h5file["group/group"].attrs['attr'], 12)
+
class TestH5ToDict(unittest.TestCase):
def setUp(self):
@@ -260,14 +430,12 @@ class TestDictToIni(unittest.TestCase):
def suite():
test_suite = unittest.TestSuite()
- test_suite.addTest(
- unittest.defaultTestLoader.loadTestsFromTestCase(TestDictToIni))
- test_suite.addTest(
- unittest.defaultTestLoader.loadTestsFromTestCase(TestDictToH5))
- test_suite.addTest(
- unittest.defaultTestLoader.loadTestsFromTestCase(TestDictToJson))
- test_suite.addTest(
- unittest.defaultTestLoader.loadTestsFromTestCase(TestH5ToDict))
+ loadTests = unittest.defaultTestLoader.loadTestsFromTestCase
+ test_suite.addTest(loadTests(TestDictToIni))
+ test_suite.addTest(loadTests(TestDictToH5))
+ test_suite.addTest(loadTests(TestDictToNx))
+ test_suite.addTest(loadTests(TestDictToJson))
+ test_suite.addTest(loadTests(TestH5ToDict))
return test_suite
diff --git a/silx/io/test/test_fabioh5.py b/silx/io/test/test_fabioh5.py
index 7b4abbc..f2c85b1 100755
--- a/silx/io/test/test_fabioh5.py
+++ b/silx/io/test/test_fabioh5.py
@@ -104,7 +104,7 @@ class TestFabioH5(unittest.TestCase):
data = numpy.arange(2 * 3)
data.shape = 2, 3
fabio_image = fabio.edfimage.edfimage(data=data)
- fabio_image.appendFrame(data=data)
+ fabio_image.append_frame(data=data)
h5_image = fabioh5.File(fabio_image=fabio_image)
dataset = h5_image["/scan_0/instrument/detector_0/data"]
@@ -124,8 +124,8 @@ class TestFabioH5(unittest.TestCase):
data3 = numpy.arange(2 * 5 * 1)
data3.shape = 2, 5, 1
fabio_image = fabio.edfimage.edfimage(data=data1)
- fabio_image.appendFrame(data=data2)
- fabio_image.appendFrame(data=data3)
+ fabio_image.append_frame(data=data2)
+ fabio_image.append_frame(data=data3)
h5_image = fabioh5.File(fabio_image=fabio_image)
dataset = h5_image["/scan_0/instrument/detector_0/data"]
@@ -207,7 +207,7 @@ class TestFabioH5(unittest.TestCase):
if fabio_image is None:
fabio_image = fabio.edfimage.EdfImage(data=data, header=header)
else:
- fabio_image.appendFrame(data=data, header=header)
+ fabio_image.append_frame(data=data, header=header)
h5_image = fabioh5.File(fabio_image=fabio_image)
data = h5_image["/scan_0/instrument/detector_0/others/float_item"]
# There is no equality between items
@@ -229,7 +229,7 @@ class TestFabioH5(unittest.TestCase):
if fabio_image is None:
fabio_image = fabio.edfimage.EdfImage(data=data, header=header)
else:
- fabio_image.appendFrame(data=data, header=header)
+ fabio_image.append_frame(data=data, header=header)
h5_image = fabioh5.File(fabio_image=fabio_image)
data = h5_image["/scan_0/instrument/detector_0/others/time_of_day"]
# There is no equality between items
@@ -249,7 +249,7 @@ class TestFabioH5(unittest.TestCase):
if fabio_image is None:
fabio_image = fabio.edfimage.EdfImage(data=data, header=header)
else:
- fabio_image.appendFrame(data=data, header=header)
+ fabio_image.append_frame(data=data, header=header)
h5_image = fabioh5.File(fabio_image=fabio_image)
data = h5_image["/scan_0/instrument/detector_0/others/float_item"]
# At worst a float32
@@ -269,7 +269,7 @@ class TestFabioH5(unittest.TestCase):
if fabio_image is None:
fabio_image = fabio.edfimage.EdfImage(data=data, header=header)
else:
- fabio_image.appendFrame(data=data, header=header)
+ fabio_image.append_frame(data=data, header=header)
h5_image = fabioh5.File(fabio_image=fabio_image)
data = h5_image["/scan_0/instrument/detector_0/others/float_item"]
# At worst a float32
@@ -289,7 +289,7 @@ class TestFabioH5(unittest.TestCase):
if fabio_image is None:
fabio_image = fabio.edfimage.EdfImage(data=data, header=header)
else:
- fabio_image.appendFrame(data=data, header=header)
+ fabio_image.append_frame(data=data, header=header)
h5_image = fabioh5.File(fabio_image=fabio_image)
data = h5_image["/scan_0/instrument/detector_0/others/float_item"]
# At worst a float32
@@ -390,7 +390,7 @@ class TestFabioH5(unittest.TestCase):
fabio_image = fabio.edfimage.edfimage(data=data, header=header)
header = {}
header["foo"] = b'a\x90bc\xFE'
- fabio_image.appendFrame(data=data, header=header)
+ fabio_image.append_frame(data=data, header=header)
except Exception as e:
_logger.error(e.args[0])
_logger.debug("Backtrace", exc_info=True)
@@ -411,7 +411,7 @@ class TestFabioH5(unittest.TestCase):
fabio_image = fabio.edfimage.edfimage(data=data, header=header)
header = {}
header["foo"] = u'abc\u2764'
- fabio_image.appendFrame(data=data, header=header)
+ fabio_image.append_frame(data=data, header=header)
except Exception as e:
_logger.error(e.args[0])
_logger.debug("Backtrace", exc_info=True)
@@ -456,7 +456,7 @@ class TestFabioH5MultiFrames(unittest.TestCase):
if fabio_file is None:
fabio_file = fabio.edfimage.EdfImage(data=data, header=header)
else:
- fabio_file.appendFrame(data=data, header=header)
+ fabio_file.append_frame(data=data, header=header)
cls.fabio_file = fabio_file
cls.fabioh5 = fabioh5.File(fabio_image=fabio_file)
diff --git a/silx/io/test/test_nxdata.py b/silx/io/test/test_nxdata.py
index a790e36..80cc193 100644
--- a/silx/io/test/test_nxdata.py
+++ b/silx/io/test/test_nxdata.py
@@ -1,6 +1,6 @@
# coding: utf-8
# /*##########################################################################
-# Copyright (C) 2016-2018 European Synchrotron Radiation Facility
+# Copyright (C) 2016-2020 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -25,7 +25,7 @@
__authors__ = ["P. Knobel"]
__license__ = "MIT"
-__date__ = "27/01/2018"
+__date__ = "24/03/2020"
import tempfile
@@ -54,6 +54,7 @@ class TestNXdata(unittest.TestCase):
g0d0.attrs["NX_class"] = "NXdata"
g0d0.attrs["signal"] = "scalar"
g0d0.create_dataset("scalar", data=10)
+ g0d0.create_dataset("scalar_errors", data=0.1)
g0d1 = g0d.create_group("2D_scalars")
g0d1.attrs["NX_class"] = "NXdata"
@@ -199,7 +200,7 @@ class TestNXdata(unittest.TestCase):
self.assertEqual(nxd.axes_names, [])
self.assertEqual(nxd.axes_dataset_names, [])
self.assertEqual(nxd.axes, [])
- self.assertIsNone(nxd.errors)
+ self.assertIsNotNone(nxd.errors)
self.assertFalse(nxd.is_scatter or nxd.is_x_y_value_scatter)
self.assertIsNone(nxd.interpretation)
diff --git a/silx/io/test/test_spectoh5.py b/silx/io/test/test_spectoh5.py
index 44b59e0..c3f03e9 100644
--- a/silx/io/test/test_spectoh5.py
+++ b/silx/io/test/test_spectoh5.py
@@ -39,7 +39,7 @@ __license__ = "MIT"
__date__ = "12/02/2018"
-sftext = """#F /tmp/sf.dat
+sfdata = b"""#F /tmp/sf.dat
#E 1455180875
#D Thu Feb 11 09:54:35 2016
#C imaging User = opid17
@@ -86,14 +86,11 @@ sftext = """#F /tmp/sf.dat
class TestConvertSpecHDF5(unittest.TestCase):
@classmethod
def setUpClass(cls):
- fd, cls.spec_fname = tempfile.mkstemp(text=False)
- if sys.version_info < (3, ):
- os.write(fd, sftext)
- else:
- os.write(fd, bytes(sftext, 'ascii'))
+ fd, cls.spec_fname = tempfile.mkstemp(prefix="TestConvertSpecHDF5")
+ os.write(fd, sfdata)
os.close(fd)
- fd, cls.h5_fname = tempfile.mkstemp(text=False)
+ fd, cls.h5_fname = tempfile.mkstemp(prefix="TestConvertSpecHDF5")
# Close and delete (we just need the name)
os.close(fd)
os.unlink(cls.h5_fname)
diff --git a/silx/io/test/test_url.py b/silx/io/test/test_url.py
index 5093fc2..e68c67a 100644
--- a/silx/io/test/test_url.py
+++ b/silx/io/test/test_url.py
@@ -197,6 +197,15 @@ class TestDataUrl(unittest.TestCase):
url = DataUrl(scheme="silx", file_path="/foo.h5", data_slice=(5, 1))
self.assertFalse(url.is_valid())
+ def test_path_creation(self):
+ """make sure the construction of path succeed and that we can
+ recreate a DataUrl from a path"""
+ for data_slice in (1, (1,)):
+ with self.subTest(data_slice=data_slice):
+ url = DataUrl(scheme="silx", file_path="/foo.h5", data_slice=data_slice)
+ path = url.path()
+ DataUrl(path=path)
+
def suite():
test_suite = unittest.TestSuite()
diff --git a/silx/io/test/test_utils.py b/silx/io/test/test_utils.py
index 56f89fc..6c70636 100644
--- a/silx/io/test/test_utils.py
+++ b/silx/io/test/test_utils.py
@@ -491,7 +491,7 @@ class TestGetData(unittest.TestCase):
data = numpy.array([[10, 50], [50, 10]])
fabiofile = fabio.edfimage.EdfImage(data, header)
fabiofile.write(cls.edf_filename)
- fabiofile.appendFrame(data=data, header=header)
+ fabiofile.append_frame(data=data, header=header)
fabiofile.write(cls.edf_multiframe_filename)
cls.txt_filename = os.path.join(directory, "test.txt")
diff --git a/silx/io/url.py b/silx/io/url.py
index c8cdc84..7607ae5 100644
--- a/silx/io/url.py
+++ b/silx/io/url.py
@@ -30,6 +30,7 @@ __date__ = "29/01/2018"
import logging
import six
+from collections.abc import Iterable
parse = six.moves.urllib.parse
@@ -211,7 +212,7 @@ class DataUrl(object):
pos = self.__path.index(url.path)
file_path = self.__path[0:pos] + url.path
else:
- scheme = url.scheme if url.scheme is not "" else None
+ scheme = url.scheme if url.scheme != "" else None
file_path = url.path
# Check absolute windows path
@@ -297,7 +298,10 @@ class DataUrl(object):
if self.__data_path is not None:
queries.append("path=" + self.__data_path)
if self.__data_slice is not None:
- data_slice = ",".join([slice_to_string(s) for s in self.__data_slice])
+ if isinstance(self.__data_slice, Iterable):
+ data_slice = ",".join([slice_to_string(s) for s in self.__data_slice])
+ else:
+ data_slice = slice_to_string(self.__data_slice)
queries.append("slice=" + data_slice)
query = "&".join(queries)
diff --git a/silx/io/utils.py b/silx/io/utils.py
index f294101..5da344d 100644
--- a/silx/io/utils.py
+++ b/silx/io/utils.py
@@ -1,6 +1,6 @@
# coding: utf-8
# /*##########################################################################
-# Copyright (C) 2016-2019 European Synchrotron Radiation Facility
+# Copyright (C) 2016-2020 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -82,7 +82,7 @@ def supported_extensions(flat_formats=True):
:param bool flat_formats: If true, also include flat formats like npy or
edf (while the expected module is available)
:returns: A dictionary indexed by file description and containing a set of
- extensions (an extension is a string like "\*.ext").
+ extensions (an extension is a string like "\\*.ext").
:rtype: Dict[str, Set[str]]
"""
formats = collections.OrderedDict()
@@ -849,7 +849,7 @@ def rawfile_to_h5_external_dataset(bin_file, output_url, shape, dtype,
raise Exception('h5py >= 2.9 should be installed to access the '
'external feature.')
- with h5py.File(output_url.file_path()) as _h5_file:
+ with h5py.File(output_url.file_path(), mode="a") as _h5_file:
if output_url.data_path() in _h5_file:
if overwrite is False:
raise ValueError('data_path already exists')