summaryrefslogtreecommitdiff
path: root/silx/third_party
diff options
context:
space:
mode:
authorPicca Frédéric-Emmanuel <picca@synchrotron-soleil.fr>2017-08-18 14:48:52 +0200
committerPicca Frédéric-Emmanuel <picca@synchrotron-soleil.fr>2017-08-18 14:48:52 +0200
commitf7bdc2acff3c13a6d632c28c4569690ab106eed7 (patch)
tree9d67cdb7152ee4e711379e03fe0546c7c3b97303 /silx/third_party
Import Upstream version 0.5.0+dfsg
Diffstat (limited to 'silx/third_party')
-rw-r--r--silx/third_party/EdfFile.py1223
-rw-r--r--silx/third_party/TiffIO.py1268
-rw-r--r--silx/third_party/__init__.py28
-rw-r--r--silx/third_party/_local/__init__.py36
-rw-r--r--silx/third_party/_local/six.py868
-rw-r--r--silx/third_party/setup.py48
-rw-r--r--silx/third_party/six.py49
7 files changed, 3520 insertions, 0 deletions
diff --git a/silx/third_party/EdfFile.py b/silx/third_party/EdfFile.py
new file mode 100644
index 0000000..8a08c20
--- /dev/null
+++ b/silx/third_party/EdfFile.py
@@ -0,0 +1,1223 @@
+# /*##########################################################################
+#
+# Copyright (c) 2004-2017 European Synchrotron Radiation Facility
+#
+# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
+# the ESRF by the Software group.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#
+# ############################################################################*/
+__author__ = "Alexandre Gobbo, V.A. Sole - ESRF Data Analysis"
+__contact__ = "sole@esrf.fr"
+__license__ = "MIT"
+__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
+"""
+ EdfFile.py
+ Generic class for Edf files manipulation.
+
+ Interface:
+ ===========================
+ class EdfFile:
+ __init__(self,FileName)
+ GetNumImages(self)
+ def GetData(self,Index, DataType="",Pos=None,Size=None):
+ GetPixel(self,Index,Position)
+ GetHeader(self,Index)
+ GetStaticHeader(self,Index)
+ WriteImage (self,Header,Data,Append=1,DataType="",WriteAsUnsigened=0,ByteOrder="")
+
+
+ Edf format assumptions:
+ ===========================
+ The following details were assumed for this implementation:
+ - Each Edf file contains a certain number of data blocks.
+ - Each data block represents data stored in an one, two or three-dimensional array.
+ - Each data block contains a header section, written in ASCII, and a data section of
+ binary information.
+ - The size of the header section in bytes is a multiple of 1024. The header is
+ padded with spaces (0x20). If the header is not padded to a multiple of 1024,
+ the file is recognized, but the output is always made in this format.
+ - The header section starts by '{' and finishes by '}'. It is composed by several
+ pairs 'keyword = value;'. The keywords are case insensitive, but the values are case
+ sensitive. Each pair is put in a new line (they are separeted by 0x0A). In the
+ end of each line, a semicolon (;) separes the pair of a comment, not interpreted.
+ Exemple:
+ {
+ ; Exemple Header
+ HeaderID = EH:000001:000000:000000 ; automatically generated
+ ByteOrder = LowByteFirst ;
+ DataType = FloatValue ; 4 bytes per pixel
+ Size = 4000000 ; size of data section
+ Dim_1= 1000 ; x coordinates
+ Dim_2 = 1000 ; y coordinates
+
+ (padded with spaces to complete 1024 bytes)
+ }
+ - There are some fields in the header that are required for this implementation. If any of
+ these is missing, or inconsistent, it will be generated an error:
+ Size: Represents size of data block
+ Dim_1: size of x coordinates (Dim_2 for 2-dimentional images, and also Dim_3 for 3d)
+ DataType
+ ByteOrder
+ - For the written images, these fields are automatically genereted:
+ Size,Dim_1 (Dim_2 and Dim_3, if necessary), Byte Order, DataType, HeaderID and Image
+ These fields are called here "static header", and can be retrieved by the method
+ GetStaticHeader. Other header components are taken by GetHeader. Both methods returns
+ a dictionary in which the key is the keyword of the pair. When writting an image through
+ WriteImage method, the Header parameter should not contain the static header information,
+ which is automatically generated.
+ - The indexing of images through these functions is based just on the 0-based position in
+ the file, the header items HeaderID and Image are not considered for referencing the
+ images.
+ - The data section contais a number of bytes equal to the value of Size keyword. Data
+ section is going to be translated into an 1D, 2D or 3D Numpy Array, and accessed
+ through GetData method call.
+"""
+DEBUG = 0
+################################################################################
+import sys
+import numpy
+import os.path
+try:
+ import gzip
+ GZIP = True
+except:
+ GZIP = False
+try:
+ import bz2
+ BZ2 = True
+except:
+ BZ2 = False
+
+MARCCD_SUPPORT = False
+PILATUS_CBF_SUPPORT = False
+CAN_USE_FASTEDF = False
+
+# Using local TiffIO
+from . import TiffIO
+TIFF_SUPPORT = True
+
+# Constants
+
+HEADER_BLOCK_SIZE = 1024
+STATIC_HEADER_ELEMENTS = (
+ "HeaderID",
+ "Image",
+ "ByteOrder",
+ "DataType",
+ "Dim_1",
+ "Dim_2",
+ "Dim_3",
+ "Offset_1",
+ "Offset_2",
+ "Offset_3",
+ "Size")
+
+STATIC_HEADER_ELEMENTS_CAPS = (
+ "HEADERID",
+ "IMAGE",
+ "BYTEORDER",
+ "DATATYPE",
+ "DIM_1",
+ "DIM_2",
+ "DIM_3",
+ "OFFSET_1",
+ "OFFSET_2",
+ "OFFSET_3",
+ "SIZE")
+
+LOWER_CASE = 0
+UPPER_CASE = 1
+
+KEYS = 1
+VALUES = 2
+
+
+class Image(object):
+ """
+ """
+ def __init__(self):
+ """ Constructor
+ """
+ self.Header = {}
+ self.StaticHeader = {}
+ self.HeaderPosition = 0
+ self.DataPosition = 0
+ self.Size = 0
+ self.NumDim = 1
+ self.Dim1 = 0
+ self.Dim2 = 0
+ self.Dim3 = 0
+ self.DataType = ""
+
+
+class EdfFile(object):
+ """
+ """
+ def __init__(self, FileName, access=None, fastedf=None):
+ """ Constructor
+
+ :param FileName: Name of the file (either existing or to be created)
+ :type FileName: string
+ :param access: access mode "r" for reading (the file should exist) or
+ "w" for writing (if the file does not exist, it does not matter).
+ :type access: string
+ :type fastedf: True to use the fastedf module
+ :param fastedf: bool
+ """
+ self.Images = []
+ self.NumImages = 0
+ self.FileName = FileName
+ self.File = 0
+ if fastedf is None:
+ fastedf = 0
+ self.fastedf = fastedf
+ self.ADSC = False
+ self.MARCCD = False
+ self.TIFF = False
+ self.PILATUS_CBF = False
+ self.SPE = False
+ if sys.byteorder == "big":
+ self.SysByteOrder = "HighByteFirst"
+ else:
+ self.SysByteOrder = "LowByteFirst"
+
+ if hasattr(FileName, "seek") and\
+ hasattr(FileName, "read"):
+ # this looks like a file descriptor ...
+ self.__ownedOpen = False
+ self.File = FileName
+ try:
+ self.FileName = self.File.name
+ except AttributeError:
+ self.FileName = self.File.filename
+ elif FileName.lower().endswith('.gz'):
+ if GZIP:
+ self.__ownedOpen = False
+ self.File = gzip.GzipFile(FileName)
+ else:
+ raise IOError("No gzip module support in this system")
+ elif FileName.lower().endswith('.bz2'):
+ if BZ2:
+ self.__ownedOpen = False
+ self.File = bz2.BZ2File(FileName)
+ else:
+ raise IOError("No bz2 module support in this system")
+ else:
+ self.__ownedOpen = True
+
+ if self.File in [0, None]:
+ if access is not None:
+ if access[0].upper() == "R":
+ if not os.path.isfile(self.FileName):
+ raise IOError("File %s not found" % FileName)
+ if 'b' not in access:
+ access += 'b'
+ if 1:
+ if not os.path.isfile(self.FileName):
+ # write access
+ if access is None:
+ # allow writing and reading
+ access = "ab+"
+ self.File = open(self.FileName, access)
+ self.File.seek(0, 0)
+ return
+ if 'b' not in access:
+ access += 'b'
+ self.File = open(self.FileName, access)
+ return
+ else:
+ if access is None:
+ if (os.access(self.FileName, os.W_OK)):
+ access = "r+b"
+ else:
+ access = "rb"
+ self.File = open(self.FileName, access)
+ self.File.seek(0, 0)
+ twoChars = self.File.read(2)
+ tiff = False
+ if sys.version < '3.0':
+ if twoChars in ["II", "MM"]:
+ tiff = True
+ elif twoChars in [eval('b"II"'), eval('b"MM"')]:
+ tiff = True
+ if tiff:
+ fileExtension = os.path.splitext(self.FileName)[-1]
+ if fileExtension.lower() in [".tif", ".tiff"] or\
+ sys.version > '2.9':
+ if not TIFF_SUPPORT:
+ raise IOError("TIFF support not implemented")
+ else:
+ self.TIFF = True
+ elif not MARCCD_SUPPORT:
+ if not TIFF_SUPPORT:
+ raise IOError("MarCCD support not implemented")
+ else:
+ self.TIFF = True
+ else:
+ self.MARCCD = True
+ basename = os.path.basename(FileName).upper()
+ if basename.endswith('.CBF'):
+ if not PILATUS_CBF_SUPPORT:
+ raise IOError("CBF support not implemented")
+ if twoChars[0] != "{":
+ self.PILATUS_CBF = True
+ elif basename.endswith('.SPE'):
+ if twoChars[0] != "$":
+ self.SPE = True
+ elif basename.endswith('EDF.GZ') or basename.endswith('CCD.GZ'):
+ self.GZIP = True
+ else:
+ try:
+ self.File.close()
+ except:
+ pass
+ raise IOError("EdfFile: Error opening file")
+
+ self.File.seek(0, 0)
+ if self.TIFF:
+ self._wrapTIFF()
+ self.File.close()
+ return
+ if self.MARCCD:
+ self._wrapMarCCD()
+ self.File.close()
+ return
+ if self.PILATUS_CBF:
+ self._wrapPilatusCBF()
+ self.File.close()
+ return
+ if self.SPE:
+ self._wrapSPE()
+ self.File.close()
+ return
+
+ Index = 0
+ line = self.File.readline()
+ selectedLines = [""]
+ if sys.version > '2.6':
+ selectedLines.append(eval('b""'))
+ parsingHeader = False
+ while line not in selectedLines:
+ # decode to make sure I have character string
+ # str to make sure python 2.x sees it as string and not unicode
+ if sys.version < '3.0':
+ if type(line) != type(str("")):
+ line = "%s" % line
+ else:
+ try:
+ line = str(line.decode())
+ except UnicodeDecodeError:
+ try:
+ line = str(line.decode('utf-8'))
+ except UnicodeDecodeError:
+ try:
+ line = str(line.decode('latin-1'))
+ except UnicodeDecodeError:
+ line = "%s" % line
+ if (line.count("{\n") >= 1) or (line.count("{\r\n") >= 1):
+ parsingHeader = True
+ Index = self.NumImages
+ self.NumImages = self.NumImages + 1
+ self.Images.append(Image())
+
+ if line.count("=") >= 1:
+ listItems = line.split("=", 1)
+ typeItem = listItems[0].strip()
+ listItems = listItems[1].split(";", 1)
+ valueItem = listItems[0].strip()
+ if (typeItem == "HEADER_BYTES") and (Index == 0):
+ self.ADSC = True
+ break
+
+ # if typeItem in self.Images[Index].StaticHeader.keys():
+ if typeItem.upper() in STATIC_HEADER_ELEMENTS_CAPS:
+ self.Images[Index].StaticHeader[typeItem] = valueItem
+ else:
+ self.Images[Index].Header[typeItem] = valueItem
+ if ((line.count("}\n") >= 1) or (line.count("}\r") >= 1)) and (parsingHeader):
+ parsingHeader = False
+ # for i in STATIC_HEADER_ELEMENTS_CAPS:
+ # if self.Images[Index].StaticHeader[i]=="":
+ # raise "Bad File Format"
+ self.Images[Index].DataPosition = self.File.tell()
+ # self.File.seek(int(self.Images[Index].StaticHeader["Size"]), 1)
+ StaticPar = SetDictCase(self.Images[Index].StaticHeader, UPPER_CASE, KEYS)
+ if "SIZE" in StaticPar.keys():
+ self.Images[Index].Size = int(StaticPar["SIZE"])
+ if self.Images[Index].Size <= 0:
+ self.NumImages = Index
+ line = self.File.readline()
+ continue
+ else:
+ raise TypeError("EdfFile: Image doesn't have size information")
+ if "DIM_1" in StaticPar.keys():
+ self.Images[Index].Dim1 = int(StaticPar["DIM_1"])
+ self.Images[Index].Offset1 = int(StaticPar.get("Offset_1", "0"))
+ else:
+ raise TypeError("EdfFile: Image doesn't have dimension information")
+ if "DIM_2" in StaticPar.keys():
+ self.Images[Index].NumDim = 2
+ self.Images[Index].Dim2 = int(StaticPar["DIM_2"])
+ self.Images[Index].Offset2 = int(StaticPar.get("Offset_2", "0"))
+ if "DIM_3" in StaticPar.keys():
+ self.Images[Index].NumDim = 3
+ self.Images[Index].Dim3 = int(StaticPar["DIM_3"])
+ self.Images[Index].Offset3 = int(StaticPar.get("Offset_3", "0"))
+ if "DATATYPE" in StaticPar.keys():
+ self.Images[Index].DataType = StaticPar["DATATYPE"]
+ else:
+ raise TypeError("EdfFile: Image doesn't have datatype information")
+ if "BYTEORDER" in StaticPar.keys():
+ self.Images[Index].ByteOrder = StaticPar["BYTEORDER"]
+ else:
+ raise TypeError("EdfFile: Image doesn't have byteorder information")
+
+ self.File.seek(self.Images[Index].Size, 1)
+
+ line = self.File.readline()
+
+ if self.ADSC:
+ self.File.seek(0, 0)
+ self.NumImages = 1
+ # this is a bad implementation of fabio adscimage
+ # please take a look at the fabio module of fable at sourceforge
+ infile = self.File
+ header_keys = []
+ header = {}
+ try:
+ """ read an adsc header """
+ line = infile.readline()
+ bytesread = len(line)
+ while '}' not in line:
+ if '=' in line:
+ (key, val) = line.split('=')
+ header_keys.append(key.strip())
+ header[key.strip()] = val.strip(' ;\n')
+ line = infile.readline()
+ bytesread = bytesread + len(line)
+ except:
+ raise Exception("Error processing adsc header")
+ # banned by bzip/gzip???
+ try:
+ infile.seek(int(header['HEADER_BYTES']), 0)
+ except TypeError:
+ # Gzipped does not allow a seek and read header is not
+ # promising to stop in the right place
+ infile.close()
+ infile = self._open(fname, "rb")
+ infile.read(int(header['HEADER_BYTES']))
+ binary = infile.read()
+ infile.close()
+
+ # now read the data into the array
+ self.Images[Index].Dim1 = int(header['SIZE1'])
+ self.Images[Index].Dim2 = int(header['SIZE2'])
+ self.Images[Index].NumDim = 2
+ self.Images[Index].DataType = 'UnsignedShort'
+ try:
+ self.__data = numpy.reshape(
+ numpy.fromstring(binary, numpy.uint16),
+ (self.Images[Index].Dim2, self.Images[Index].Dim1))
+ except ValueError:
+ msg = 'Size spec in ADSC-header does not match size of image data field'
+ raise IOError(msg)
+ if 'little' in header['BYTE_ORDER']:
+ self.Images[Index].ByteOrder = 'LowByteFirst'
+ else:
+ self.Images[Index].ByteOrder = 'HighByteFirst'
+ if self.SysByteOrder.upper() != self.Images[Index].ByteOrder.upper():
+ self.__data = self.__data.byteswap()
+ self.Images[Index].ByteOrder = self.SysByteOrder
+
+ self.Images[Index].StaticHeader['Dim_1'] = self.Images[Index].Dim1
+ self.Images[Index].StaticHeader['Dim_2'] = self.Images[Index].Dim2
+ self.Images[Index].StaticHeader['Offset_1'] = 0
+ self.Images[Index].StaticHeader['Offset_2'] = 0
+ self.Images[Index].StaticHeader['DataType'] = self.Images[Index].DataType
+
+ self.__makeSureFileIsClosed()
+
+ def _wrapTIFF(self):
+ self._wrappedInstance = TiffIO.TiffIO(self.File, cache_length=0, mono_output=True)
+ self.NumImages = self._wrappedInstance.getNumberOfImages()
+ if self.NumImages < 1:
+ return
+
+ # wrapped image objects have to provide getInfo and getData
+ # info = self._wrappedInstance.getInfo( index)
+ # data = self._wrappedInstance.getData( index)
+ # for the time being I am going to assume all the images
+ # in the file have the same data type type
+ data = None
+
+ for Index in range(self.NumImages):
+ info = self._wrappedInstance.getInfo(Index)
+ self.Images.append(Image())
+ self.Images[Index].Dim1 = info['nRows']
+ self.Images[Index].Dim2 = info['nColumns']
+ self.Images[Index].NumDim = 2
+ if data is None:
+ data = self._wrappedInstance.getData(0)
+ self.Images[Index].DataType = self.__GetDefaultEdfType__(data.dtype)
+ self.Images[Index].StaticHeader['Dim_1'] = self.Images[Index].Dim1
+ self.Images[Index].StaticHeader['Dim_2'] = self.Images[Index].Dim2
+ self.Images[Index].StaticHeader['Offset_1'] = 0
+ self.Images[Index].StaticHeader['Offset_2'] = 0
+ self.Images[Index].StaticHeader['DataType'] = self.Images[Index].DataType
+ self.Images[Index].Header.update(info)
+
+ def _wrapMarCCD(self):
+ raise NotImplementedError("Look at the module EdfFile from PyMca")
+
+ def _wrapPilatusCBF(self):
+ raise NotImplementedError("Look at the module EdfFile from PyMca")
+
+ def _wrapSPE(self):
+ if 0 and sys.version < '3.0':
+ self.File.seek(42)
+ xdim = numpy.int64(numpy.fromfile(self.File, numpy.int16, 1)[0])
+ self.File.seek(656)
+ ydim = numpy.int64(numpy.fromfile(self.File, numpy.int16, 1))
+ self.File.seek(4100)
+ self.__data = numpy.fromfile(self.File, numpy.uint16, int(xdim * ydim))
+ else:
+ import struct
+ self.File.seek(0)
+ a = self.File.read()
+ xdim = numpy.int64(struct.unpack('<h', a[42:44])[0])
+ ydim = numpy.int64(struct.unpack('<h', a[656:658])[0])
+ fmt = '<%dH' % int(xdim * ydim)
+ self.__data = numpy.array(struct.unpack(fmt, a[4100:int(4100 + int(2 * xdim * ydim))])).astype(numpy.uint16)
+ self.__data.shape = ydim, xdim
+ Index = 0
+ self.Images.append(Image())
+ self.NumImages = 1
+ self.Images[Index].Dim1 = ydim
+ self.Images[Index].Dim2 = xdim
+ self.Images[Index].NumDim = 2
+ self.Images[Index].DataType = 'UnsignedShort'
+ self.Images[Index].ByteOrder = 'LowByteFirst'
+ if self.SysByteOrder.upper() != self.Images[Index].ByteOrder.upper():
+ self.__data = self.__data.byteswap()
+ self.Images[Index].StaticHeader['Dim_1'] = self.Images[Index].Dim1
+ self.Images[Index].StaticHeader['Dim_2'] = self.Images[Index].Dim2
+ self.Images[Index].StaticHeader['Offset_1'] = 0
+ self.Images[Index].StaticHeader['Offset_2'] = 0
+ self.Images[Index].StaticHeader['DataType'] = self.Images[Index].DataType
+
+ def GetNumImages(self):
+ """ Returns number of images of the object (and associated file)
+ """
+ return self.NumImages
+
+ def GetData(self, *var, **kw):
+ try:
+ self.__makeSureFileIsOpen()
+ return self._GetData(*var, **kw)
+ finally:
+ self.__makeSureFileIsClosed()
+
+ def _GetData(self, Index, DataType="", Pos=None, Size=None):
+ """ Returns numpy array with image data
+ Index: The zero-based index of the image in the file
+ DataType: The edf type of the array to be returnd
+ If ommited, it is used the default one for the type
+ indicated in the image header
+ Attention to the absence of UnsignedShort,
+ UnsignedInteger and UnsignedLong types in
+ Numpy Python
+ Default relation between Edf types and NumPy's typecodes:
+ SignedByte int8 b
+ UnsignedByte uint8 B
+ SignedShort int16 h
+ UnsignedShort uint16 H
+ SignedInteger int32 i
+ UnsignedInteger uint32 I
+ SignedLong int32 i
+ UnsignedLong uint32 I
+ Signed64 int64 (l in 64bit, q in 32 bit)
+ Unsigned64 uint64 (L in 64bit, Q in 32 bit)
+ FloatValue float32 f
+ DoubleValue float64 d
+ Pos: Tuple (x) or (x,y) or (x,y,z) that indicates the begining
+ of data to be read. If ommited, set to the origin (0),
+ (0,0) or (0,0,0)
+ Size: Tuple, size of the data to be returned as x) or (x,y) or
+ (x,y,z) if ommited, is the distance from Pos to the end.
+
+ If Pos and Size not mentioned, returns the whole data.
+ """
+ fastedf = self.fastedf
+ if Index < 0 or Index >= self.NumImages:
+ raise ValueError("EdfFile: Index out of limit")
+ if fastedf is None:
+ fastedf = 0
+ if Pos is None and Size is None:
+ if self.ADSC or self.MARCCD or self.PILATUS_CBF or self.SPE:
+ return self.__data
+ elif self.TIFF:
+ data = self._wrappedInstance.getData(Index)
+ return data
+ else:
+ self.File.seek(self.Images[Index].DataPosition, 0)
+ datatype = self.__GetDefaultNumpyType__(self.Images[Index].DataType, index=Index)
+ try:
+ datasize = self.__GetSizeNumpyType__(datatype)
+ except TypeError:
+ print("What is the meaning of this error?")
+ datasize = 8
+ if self.Images[Index].NumDim == 3:
+ image = self.Images[Index]
+ sizeToRead = image.Dim1 * image.Dim2 * image.Dim3 * datasize
+ Data = numpy.fromstring(self.File.read(sizeToRead), datatype)
+ Data = numpy.reshape(Data, (self.Images[Index].Dim3, self.Images[Index].Dim2, self.Images[Index].Dim1))
+ elif self.Images[Index].NumDim == 2:
+ image = self.Images[Index]
+ sizeToRead = image.Dim1 * image.Dim2 * datasize
+ Data = numpy.fromstring(self.File.read(sizeToRead), datatype)
+ # print "datatype = ",datatype
+ # print "Data.type = ", Data.dtype.char
+ # print "self.Images[Index].DataType ", self.Images[Index].DataType
+ # print "Data.shape",Data.shape
+ # print "datasize = ",datasize
+ # print "sizeToRead ",sizeToRead
+ # print "lenData = ", len(Data)
+ Data = numpy.reshape(Data, (self.Images[Index].Dim2, self.Images[Index].Dim1))
+ elif self.Images[Index].NumDim == 1:
+ sizeToRead = self.Images[Index].Dim1 * datasize
+ Data = numpy.fromstring(self.File.read(sizeToRead), datatype)
+ elif self.ADSC or self.MARCCD or self.PILATUS_CBF or self.SPE:
+ return self.__data[Pos[1]:(Pos[1] + Size[1]),
+ Pos[0]:(Pos[0] + Size[0])]
+ elif self.TIFF:
+ data = self._wrappedInstance.getData(Index)
+ return data[Pos[1]:(Pos[1] + Size[1]), Pos[0]:(Pos[0] + Size[0])]
+ elif fastedf and CAN_USE_FASTEDF:
+ raise NotImplementedError("Look at the module EdfFile from PyMCA")
+ else:
+ if fastedf:
+ print("It could not use fast routines")
+ type_ = self.__GetDefaultNumpyType__(self.Images[Index].DataType, index=Index)
+ size_pixel = self.__GetSizeNumpyType__(type_)
+ Data = numpy.array([], type_)
+ if self.Images[Index].NumDim == 1:
+ if Pos is None:
+ Pos = (0,)
+ if Size is None:
+ Size = (0,)
+ sizex = self.Images[Index].Dim1
+ Size = list(Size)
+ if Size[0] == 0:
+ Size[0] = sizex - Pos[0]
+ self.File.seek((Pos[0] * size_pixel) + self.Images[Index].DataPosition, 0)
+ Data = numpy.fromstring(self.File.read(Size[0] * size_pixel), type_)
+ elif self.Images[Index].NumDim == 2:
+ if Pos is None:
+ Pos = (0, 0)
+ if Size is None:
+ Size = (0, 0)
+ Size = list(Size)
+ sizex, sizey = self.Images[Index].Dim1, self.Images[Index].Dim2
+ if Size[0] == 0:
+ Size[0] = sizex - Pos[0]
+ if Size[1] == 0:
+ Size[1] = sizey - Pos[1]
+ # print len(range(Pos[1],Pos[1]+Size[1])), "LECTURES OF ", Size[0], "POINTS"
+ # print "sizex = ", sizex, "sizey = ", sizey
+ Data = numpy.zeros((Size[1], Size[0]), type_)
+ dataindex = 0
+ for y in range(Pos[1], Pos[1] + Size[1]):
+ self.File.seek((((y * sizex) + Pos[0]) * size_pixel) + self.Images[Index].DataPosition, 0)
+ line = numpy.fromstring(self.File.read(Size[0] * size_pixel), type_)
+ Data[dataindex, :] = line[:]
+ # Data=numpy.concatenate((Data,line))
+ dataindex += 1
+ # print "DataSize = ",Data.shape
+ # print "Requested reshape = ",Size[1],'x',Size[0]
+ # Data = numpy.reshape(Data, (Size[1],Size[0]))
+ elif self.Images[Index].NumDim == 3:
+ if Pos is None:
+ Pos = (0, 0, 0)
+ if Size is None:
+ Size = (0, 0, 0)
+ Size = list(Size)
+ sizex, sizey, sizez = self.Images[Index].Dim1, self.Images[Index].Dim2, self.Images[Index].Dim3
+ if Size[0] == 0:
+ Size[0] = sizex - Pos[0]
+ if Size[1] == 0:
+ Size[1] = sizey - Pos[1]
+ if Size[2] == 0:
+ Size[2] = sizez - Pos[2]
+ for z in range(Pos[2], Pos[2] + Size[2]):
+ for y in range(Pos[1], Pos[1] + Size[1]):
+ self.File.seek(((((z * sizey + y) * sizex) + Pos[0]) * size_pixel) + self.Images[Index].DataPosition, 0)
+ line = numpy.fromstring(self.File.read(Size[0] * size_pixel), type_)
+ Data = numpy.concatenate((Data, line))
+ Data = numpy.reshape(Data, (Size[2], Size[1], Size[0]))
+
+ if self.SysByteOrder.upper() != self.Images[Index].ByteOrder.upper():
+ Data = Data.byteswap()
+ if DataType != "":
+ Data = self.__SetDataType__(Data, DataType)
+ return Data
+
+ def GetPixel(self, Index, Position):
+ """ Returns double value of the pixel, regardless the format of the array
+ Index: The zero-based index of the image in the file
+ Position: Tuple with the coordinete (x), (x,y) or (x,y,z)
+ """
+ if Index < 0 or Index >= self.NumImages:
+ raise ValueError("EdfFile: Index out of limit")
+ if len(Position) != self.Images[Index].NumDim:
+ raise ValueError("EdfFile: coordinate with wrong dimension ")
+
+ size_pixel = self.__GetSizeNumpyType__(self.__GetDefaultNumpyType__(self.Images[Index].DataType), index=Index)
+ offset = Position[0] * size_pixel
+ if self.Images[Index].NumDim > 1:
+ size_row = size_pixel * self.Images[Index].Dim1
+ offset = offset + (Position[1] * size_row)
+ if self.Images[Index].NumDim == 3:
+ size_img = size_row * self.Images[Index].Dim2
+ offset = offset + (Position[2] * size_img)
+ self.File.seek(self.Images[Index].DataPosition + offset, 0)
+ Data = numpy.fromstring(self.File.read(size_pixel), self.__GetDefaultNumpyType__(self.Images[Index].DataType, index=Index))
+ if self.SysByteOrder.upper() != self.Images[Index].ByteOrder.upper():
+ Data = Data.byteswap()
+ Data = self.__SetDataType__(Data, "DoubleValue")
+ return Data[0]
+
+ def GetHeader(self, Index):
+ """ Returns dictionary with image header fields.
+ Does not include the basic fields (static) defined by data shape,
+ type and file position. These are get with GetStaticHeader
+ method.
+ Index: The zero-based index of the image in the file
+ """
+ if Index < 0 or Index >= self.NumImages:
+ raise ValueError("Index out of limit")
+ # return self.Images[Index].Header
+ ret = {}
+ for i in self.Images[Index].Header.keys():
+ ret[i] = self.Images[Index].Header[i]
+ return ret
+
+ def GetStaticHeader(self, Index):
+ """ Returns dictionary with static parameters
+ Data format and file position dependent information
+ (dim1,dim2,size,datatype,byteorder,headerId,Image)
+ Index: The zero-based index of the image in the file
+ """
+ if Index < 0 or Index >= self.NumImages:
+ raise ValueError("Index out of limit")
+ # return self.Images[Index].StaticHeader
+ ret = {}
+ for i in self.Images[Index].StaticHeader.keys():
+ ret[i] = self.Images[Index].StaticHeader[i]
+ return ret
+
+ def WriteImage(self, *var, **kw):
+ try:
+ self.__makeSureFileIsOpen()
+ return self._WriteImage(*var, **kw)
+ finally:
+ self.__makeSureFileIsClosed()
+
+ def _WriteImage(self, Header, Data, Append=1, DataType="", ByteOrder=""):
+ """ Writes image to the file.
+ Header: Dictionary containing the non-static header
+ information (static information is generated
+ according to position of image and data format
+ Append: If equals to 0, overwrites the file. Otherwise, appends
+ to the end of the file
+ DataType: The data type to be saved to the file:
+ SignedByte
+ UnsignedByte
+ SignedShort
+ UnsignedShort
+ SignedInteger
+ UnsignedInteger
+ SignedLong
+ UnsignedLong
+ FloatValue
+ DoubleValue
+ Default: according to Data array typecode:
+ 1: SignedByte
+ b: UnsignedByte
+ s: SignedShort
+ w: UnsignedShort
+ i: SignedInteger
+ l: SignedLong
+ u: UnsignedLong
+ f: FloatValue
+ d: DoubleValue
+ ByteOrder: Byte order of the data in file:
+ HighByteFirst
+ LowByteFirst
+ Default: system's byte order
+ """
+ if Append == 0:
+ self.File.truncate(0)
+ self.Images = []
+ self.NumImages = 0
+ Index = self.NumImages
+ self.NumImages = self.NumImages + 1
+ self.Images.append(Image())
+
+ # self.Images[Index].StaticHeader["Dim_1"] = "%d" % Data.shape[1]
+ # self.Images[Index].StaticHeader["Dim_2"] = "%d" % Data.shape[0]
+ scalarSize = self.__GetSizeNumpyType__(Data.dtype)
+ if len(Data.shape) == 1:
+ self.Images[Index].Dim1 = Data.shape[0]
+ self.Images[Index].StaticHeader["Dim_1"] = "%d" % self.Images[Index].Dim1
+ self.Images[Index].Size = Data.shape[0] * scalarSize
+ elif len(Data.shape) == 2:
+ self.Images[Index].Dim1 = Data.shape[1]
+ self.Images[Index].Dim2 = Data.shape[0]
+ self.Images[Index].StaticHeader["Dim_1"] = "%d" % self.Images[Index].Dim1
+ self.Images[Index].StaticHeader["Dim_2"] = "%d" % self.Images[Index].Dim2
+ self.Images[Index].Size = Data.shape[0] * Data.shape[1] * scalarSize
+ self.Images[Index].NumDim = 2
+ elif len(Data.shape) == 3:
+ self.Images[Index].Dim1 = Data.shape[2]
+ self.Images[Index].Dim2 = Data.shape[1]
+ self.Images[Index].Dim3 = Data.shape[0]
+ self.Images[Index].StaticHeader["Dim_1"] = "%d" % self.Images[Index].Dim1
+ self.Images[Index].StaticHeader["Dim_2"] = "%d" % self.Images[Index].Dim2
+ self.Images[Index].StaticHeader["Dim_3"] = "%d" % self.Images[Index].Dim3
+ self.Images[Index].Size = Data.shape[0] * Data.shape[1] * Data.shape[2] * scalarSize
+ self.Images[Index].NumDim = 3
+ elif len(Data.shape) > 3:
+ raise TypeError("EdfFile: Data dimension not suported")
+
+ if DataType == "":
+ self.Images[Index].DataType = self.__GetDefaultEdfType__(Data.dtype)
+ else:
+ self.Images[Index].DataType = DataType
+ Data = self.__SetDataType__(Data, DataType)
+
+ if ByteOrder == "":
+ self.Images[Index].ByteOrder = self.SysByteOrder
+ else:
+ self.Images[Index].ByteOrder = ByteOrder
+
+ self.Images[Index].StaticHeader["Size"] = "%d" % self.Images[Index].Size
+ self.Images[Index].StaticHeader["Image"] = Index + 1
+ self.Images[Index].StaticHeader["HeaderID"] = "EH:%06d:000000:000000" % self.Images[Index].StaticHeader["Image"]
+ self.Images[Index].StaticHeader["ByteOrder"] = self.Images[Index].ByteOrder
+ self.Images[Index].StaticHeader["DataType"] = self.Images[Index].DataType
+
+ self.Images[Index].Header = {}
+ self.File.seek(0, 2)
+ StrHeader = "{\n"
+ for i in STATIC_HEADER_ELEMENTS:
+ if i in self.Images[Index].StaticHeader.keys():
+ StrHeader = StrHeader + ("%s = %s ;\n" % (i, self.Images[Index].StaticHeader[i]))
+ for i in Header.keys():
+ StrHeader = StrHeader + ("%s = %s ;\n" % (i, Header[i]))
+ self.Images[Index].Header[i] = Header[i]
+ newsize = (((len(StrHeader) + 1) / HEADER_BLOCK_SIZE) + 1) * HEADER_BLOCK_SIZE - 2
+ newsize = int(newsize)
+ StrHeader = StrHeader.ljust(newsize)
+ StrHeader = StrHeader + "}\n"
+
+ self.Images[Index].HeaderPosition = self.File.tell()
+ self.File.write(StrHeader.encode())
+ self.Images[Index].DataPosition = self.File.tell()
+
+ # if self.Images[Index].StaticHeader["ByteOrder"] != self.SysByteOrder:
+ if self.Images[Index].ByteOrder.upper() != self.SysByteOrder.upper():
+ self.File.write((Data.byteswap()).tostring())
+ else:
+ self.File.write(Data.tostring())
+
+ def __makeSureFileIsOpen(self):
+ if DEBUG:
+ print("Making sure file is open")
+ if not self.__ownedOpen:
+ return
+ if self.ADSC or self.MARCCD or self.PILATUS_CBF or self.SPE:
+ if DEBUG:
+ print("Special case. Image is buffered")
+ return
+ if self.File in [0, None]:
+ if DEBUG:
+ print("File is None")
+ elif self.File.closed:
+ if DEBUG:
+ print("Reopening closed file")
+ accessMode = self.File.mode
+ fileName = self.File.name
+ newFile = open(fileName, accessMode)
+ self.File = newFile
+ return
+
+ def __makeSureFileIsClosed(self):
+ if DEBUG:
+ print("Making sure file is closed")
+ if not self.__ownedOpen:
+ return
+ if self.ADSC or self.MARCCD or self.PILATUS_CBF or self.SPE:
+ if DEBUG:
+ print("Special case. Image is buffered")
+ return
+ if self.File in [0, None]:
+ if DEBUG:
+ print("File is None")
+ elif not self.File.closed:
+ if DEBUG:
+ print("Closing file")
+ self.File.close()
+ return
+
+ def __GetDefaultNumpyType__(self, EdfType, index=None):
+ """ Internal method: returns NumPy type according to Edf type
+ """
+ return self.GetDefaultNumpyType(EdfType, index)
+
+ def __GetDefaultEdfType__(self, NumpyType):
+ """ Internal method: returns Edf type according Numpy type
+ """
+ if NumpyType in ["b", numpy.int8]:
+ return "SignedByte"
+ elif NumpyType in ["B", numpy.uint8]:
+ return "UnsignedByte"
+ elif NumpyType in ["h", numpy.int16]:
+ return "SignedShort"
+ elif NumpyType in ["H", numpy.uint16]:
+ return "UnsignedShort"
+ elif NumpyType in ["i", numpy.int32]:
+ return "SignedInteger"
+ elif NumpyType in ["I", numpy.uint32]:
+ return "UnsignedInteger"
+ elif NumpyType == "l":
+ if sys.platform == 'linux2':
+ return "Signed64"
+ else:
+ return "SignedLong"
+ elif NumpyType == "L":
+ if sys.platform == 'linux2':
+ return "Unsigned64"
+ else:
+ return "UnsignedLong"
+ elif NumpyType == numpy.int64:
+ return "Signed64"
+ elif NumpyType == numpy.uint64:
+ return "Unsigned64"
+ elif NumpyType in ["f", numpy.float32]:
+ return "FloatValue"
+ elif NumpyType in ["d", numpy.float64]:
+ return "DoubleValue"
+ else:
+ raise TypeError("unknown NumpyType %s" % NumpyType)
+
+ def __GetSizeNumpyType__(self, NumpyType):
+ """ Internal method: returns size of NumPy's Array Types
+ """
+ if NumpyType in ["b", numpy.int8]:
+ return 1
+ elif NumpyType in ["B", numpy.uint8]:
+ return 1
+ elif NumpyType in ["h", numpy.int16]:
+ return 2
+ elif NumpyType in ["H", numpy.uint16]:
+ return 2
+ elif NumpyType in ["i", numpy.int32]:
+ return 4
+ elif NumpyType in ["I", numpy.uint32]:
+ return 4
+ elif NumpyType == "l":
+ if sys.platform == 'linux2':
+ return 8 # 64 bit
+ else:
+ return 4 # 32 bit
+ elif NumpyType == "L":
+ if sys.platform == 'linux2':
+ return 8 # 64 bit
+ else:
+ return 4 # 32 bit
+ elif NumpyType in ["f", numpy.float32]:
+ return 4
+ elif NumpyType in ["d", numpy.float64]:
+ return 8
+ elif NumpyType == "Q":
+ return 8 # unsigned 64 in 32 bit
+ elif NumpyType == "q":
+ return 8 # signed 64 in 32 bit
+ elif NumpyType == numpy.uint64:
+ return 8
+ elif NumpyType == numpy.int64:
+ return 8
+ else:
+ raise TypeError("unknown NumpyType %s" % NumpyType)
+
+ def __SetDataType__(self, Array, DataType):
+ """ Internal method: array type convertion
+ """
+ # AVOID problems not using FromEdfType= Array.dtype.char
+ FromEdfType = Array.dtype
+ ToEdfType = self.__GetDefaultNumpyType__(DataType)
+ if ToEdfType != FromEdfType:
+ aux = Array.astype(self.__GetDefaultNumpyType__(DataType))
+ return aux
+ return Array
+
+ def __del__(self):
+ try:
+ self.__makeSureFileIsClosed()
+ except:
+ pass
+
+ def GetDefaultNumpyType(self, EdfType, index=None):
+ """ Returns NumPy type according Edf type
+ """
+ if index is None:
+ return GetDefaultNumpyType(EdfType)
+ EdfType = EdfType.upper()
+ if EdfType in ['SIGNED64']:
+ return numpy.int64
+ if EdfType in ['UNSIGNED64']:
+ return numpy.uint64
+ if EdfType in ["SIGNEDLONG", "UNSIGNEDLONG"]:
+ dim1 = 1
+ dim2 = 1
+ dim3 = 1
+ if hasattr(self.Images[index], "Dim1"):
+ dim1 = self.Images[index].Dim1
+ if hasattr(self.Images[index], "Dim2"):
+ dim2 = self.Images[index].Dim2
+ if dim2 <= 0:
+ dim2 = 1
+ if hasattr(self.Images[index], "Dim3"):
+ dim3 = self.Images[index].Dim3
+ if dim3 <= 0:
+ dim3 = 1
+ if hasattr(self.Images[index], "Size"):
+ size = self.Images[index].Size
+ if size / (dim1 * dim2 * dim3) == 8:
+ if EdfType == "UNSIGNEDLONG":
+ return numpy.uint64
+ else:
+ return numpy.int64
+ if EdfType == "UNSIGNEDLONG":
+ return numpy.uint32
+ else:
+ return numpy.int32
+
+ return GetDefaultNumpyType(EdfType)
+
+
+def GetDefaultNumpyType(EdfType):
+ """ Returns NumPy type according Edf type
+ """
+ EdfType = EdfType.upper()
+ if EdfType == "SIGNEDBYTE":
+ return numpy.int8 # "b"
+ elif EdfType == "UNSIGNEDBYTE":
+ return numpy.uint8 # "B"
+ elif EdfType == "SIGNEDSHORT":
+ return numpy.int16 # "h"
+ elif EdfType == "UNSIGNEDSHORT":
+ return numpy.uint16 # "H"
+ elif EdfType == "SIGNEDINTEGER":
+ return numpy.int32 # "i"
+ elif EdfType == "UNSIGNEDINTEGER":
+ return numpy.uint32 # "I"
+ elif EdfType == "SIGNEDLONG":
+ return numpy.int32 # "i" #ESRF acquisition is made in 32bit
+ elif EdfType == "UNSIGNEDLONG":
+ return numpy.uint32 # "I" #ESRF acquisition is made in 32bit
+ elif EdfType == "SIGNED64":
+ return numpy.int64 # "l"
+ elif EdfType == "UNSIGNED64":
+ return numpy.uint64 # "L"
+ elif EdfType == "FLOATVALUE":
+ return numpy.float32 # "f"
+ elif EdfType == "FLOAT":
+ return numpy.float32 # "f"
+ elif EdfType == "DOUBLEVALUE":
+ return numpy.float64 # "d"
+ else:
+ raise TypeError("unknown EdfType %s" % EdfType)
+
+
+def SetDictCase(Dict, Case, Flag):
+ """ Returns dictionary with keys and/or values converted into upper or lowercase
+ Dict: input dictionary
+ Case: LOWER_CASE, UPPER_CASE
+ Flag: KEYS, VALUES or KEYS | VALUES
+ """
+ newdict = {}
+ for i in Dict.keys():
+ newkey = i
+ newvalue = Dict[i]
+ if Flag & KEYS:
+ if Case == LOWER_CASE:
+ newkey = newkey.lower()
+ else:
+ newkey = newkey.upper()
+ if Flag & VALUES:
+ if Case == LOWER_CASE:
+ newvalue = newvalue.lower()
+ else:
+ newvalue = newvalue.upper()
+ newdict[newkey] = newvalue
+ return newdict
+
+
+def GetRegion(Arr, Pos, Size):
+ """Returns array with refion of Arr.
+ Arr must be 1d, 2d or 3d
+ Pos and Size are tuples in the format (x) or (x,y) or (x,y,z)
+ Both parameters must have the same size as the dimention of Arr
+ """
+ Dim = len(Arr.shape)
+ if len(Pos) != Dim:
+ return None
+ if len(Size) != Dim:
+ return None
+
+ if (Dim == 1):
+ SizeX = Size[0]
+ if SizeX == 0:
+ SizeX = Arr.shape[0] - Pos[0]
+ ArrRet = numpy.take(Arr, range(Pos[0], Pos[0] + SizeX))
+ elif (Dim == 2):
+ SizeX = Size[0]
+ SizeY = Size[1]
+ if SizeX == 0:
+ SizeX = Arr.shape[1] - Pos[0]
+ if SizeY == 0:
+ SizeY = Arr.shape[0] - Pos[1]
+ ArrRet = numpy.take(Arr, range(Pos[1], Pos[1] + SizeY))
+ ArrRet = numpy.take(ArrRet, range(Pos[0], Pos[0] + SizeX), 1)
+ elif (Dim == 3):
+ SizeX = Size[0]
+ SizeY = Size[1]
+ SizeZ = Size[2]
+ if SizeX == 0:
+ SizeX = Arr.shape[2] - Pos[0]
+ if SizeY == 0:
+ SizeX = Arr.shape[1] - Pos[1]
+ if SizeZ == 0:
+ SizeZ = Arr.shape[0] - Pos[2]
+ ArrRet = numpy.take(Arr, range(Pos[2], Pos[2] + SizeZ))
+ ArrRet = numpy.take(ArrRet, range(Pos[1], Pos[1] + SizeY), 1)
+ ArrRet = numpy.take(ArrRet, range(Pos[0], Pos[0] + SizeX), 2)
+ else:
+ ArrRet = None
+ return ArrRet
+
+
+if __name__ == "__main__":
+ if 1:
+ a = numpy.zeros((5, 10))
+ for i in range(5):
+ for j in range(10):
+ a[i, j] = 10 * i + j
+ edf = EdfFile("armando.edf", access="ab+")
+ edf.WriteImage({}, a)
+ del edf # force to close the file
+ inp = EdfFile("armando.edf")
+ b = inp.GetData(0)
+ out = EdfFile("armando2.edf")
+ out.WriteImage({}, b)
+ del out # force to close the file
+ inp2 = EdfFile("armando2.edf")
+ c = inp2.GetData(0)
+ print("A SHAPE = ", a.shape)
+ print("B SHAPE = ", b.shape)
+ print("C SHAPE = ", c.shape)
+ for i in range(5):
+ print("A", a[i, :])
+ print("B", b[i, :])
+ print("C", c[i, :])
+
+ x = numpy.arange(100)
+ x.shape = 5, 20
+ for item in ["SignedByte", "UnsignedByte",
+ "SignedShort", "UnsignedShort",
+ "SignedLong", "UnsignedLong",
+ "Signed64", "Unsigned64",
+ "FloatValue", "DoubleValue"]:
+ fname = item + ".edf"
+ if os.path.exists(fname):
+ os.remove(fname)
+ towrite = EdfFile(fname)
+ towrite.WriteImage({}, x, DataType=item, Append=0)
+ sys.exit(0)
+
+ # Creates object based on file exe.edf
+ exe = EdfFile("images/test_image.edf")
+ x = EdfFile("images/test_getdata.edf")
+ # Gets unsigned short data, storing in an signed long
+ arr = exe.GetData(0, Pos=(100, 200), Size=(200, 400))
+ x.WriteImage({}, arr, 0)
+
+ arr = exe.GetData(0, Pos=(100, 200))
+ x.WriteImage({}, arr)
+
+ arr = exe.GetData(0, Size=(200, 400))
+ x.WriteImage({}, arr)
+
+ arr = exe.GetData(0)
+ x.WriteImage({}, arr)
+
+ sys.exit()
+
+ # Creates object based on file exe.edf
+ exe = EdfFile("images/.edf")
+
+ # Creates long array , filled with 0xFFFFFFFF(-1)
+ la = numpy.zeros((100, 100))
+ la = la - 1
+
+ # Creates a short array, filled with 0xFFFF
+ sa = numpy.zeros((100, 100))
+ sa = sa + 0xFFFF
+ sa = sa.astype("s")
+
+ # Writes long array, initializing file (append=0)
+ exe.WriteImage({}, la, 0, "")
+
+ # Appends short array with new header items
+ exe.WriteImage({'Name': 'Alexandre', 'Date': '16/07/2001'}, sa)
+
+ # Appends short array, in Edf type unsigned
+ exe.WriteImage({}, sa, DataType="UnsignedShort")
+
+ # Appends short array, in Edf type unsigned
+ exe.WriteImage({}, sa, DataType="UnsignedLong")
+
+ # Appends long array as a double, considering unsigned
+ exe.WriteImage({}, la, DataType="DoubleValue", WriteAsUnsigened=1)
+
+ # Gets unsigned short data, storing in an signed long
+ ushort = exe.GetData(2, "SignedLong")
+
+ # Makes an operation
+ ushort = ushort - 0x10
+
+ # Saves Result as signed long
+ exe.WriteImage({}, ushort)
+
+ # Saves in the original format (unsigned short)
+ OldHeader = exe.GetStaticHeader(2)
+ exe.WriteImage({}, ushort, 1, OldHeader["DataType"])
diff --git a/silx/third_party/TiffIO.py b/silx/third_party/TiffIO.py
new file mode 100644
index 0000000..156ae11
--- /dev/null
+++ b/silx/third_party/TiffIO.py
@@ -0,0 +1,1268 @@
+# /*##########################################################################
+#
+# The PyMca X-Ray Fluorescence Toolkit
+#
+# Copyright (c) 2004-2015 European Synchrotron Radiation Facility
+#
+# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
+# the ESRF by the Software group.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#
+# ############################################################################*/
+__author__ = "V.A. Sole - ESRF Data Analysis"
+__contact__ = "sole@esrf.fr"
+__license__ = "MIT"
+__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
+
+import sys
+import os
+import struct
+import numpy
+
+DEBUG = 0
+ALLOW_MULTIPLE_STRIPS = False
+
+TAG_ID = { 256:"NumberOfColumns", # S or L ImageWidth
+ 257:"NumberOfRows", # S or L ImageHeight
+ 258:"BitsPerSample", # S Number of bits per component
+ 259:"Compression", # SHORT (1 - NoCompression, ...
+ 262:"PhotometricInterpretation", # SHORT (0 - WhiteIsZero, 1 -BlackIsZero, 2 - RGB, 3 - Palette color
+ 270:"ImageDescription", # ASCII
+ 273:"StripOffsets", # S or L, for each strip, the byte offset of the strip
+ 277:"SamplesPerPixel", # SHORT (>=3) only for RGB images
+ 278:"RowsPerStrip", # S or L, number of rows in each back may be not for the last
+ 279:"StripByteCounts", # S or L, The number of bytes in the strip AFTER any compression
+ 305:"Software", # ASCII
+ 306:"Date", # ASCII
+ 320:"Colormap", # Colormap of Palette-color Images
+ 339:"SampleFormat", # SHORT Interpretation of data in each pixel
+ }
+
+#TILES ARE TO BE SUPPORTED TOO ...
+TAG_NUMBER_OF_COLUMNS = 256
+TAG_NUMBER_OF_ROWS = 257
+TAG_BITS_PER_SAMPLE = 258
+TAG_PHOTOMETRIC_INTERPRETATION = 262
+TAG_COMPRESSION = 259
+TAG_IMAGE_DESCRIPTION = 270
+TAG_STRIP_OFFSETS = 273
+TAG_SAMPLES_PER_PIXEL = 277
+TAG_ROWS_PER_STRIP = 278
+TAG_STRIP_BYTE_COUNTS = 279
+TAG_SOFTWARE = 305
+TAG_DATE = 306
+TAG_COLORMAP = 320
+TAG_SAMPLE_FORMAT = 339
+
+FIELD_TYPE = {1:('BYTE', "B"),
+ 2:('ASCII', "s"), #string ending with binary zero
+ 3:('SHORT', "H"),
+ 4:('LONG', "I"),
+ 5:('RATIONAL',"II"),
+ 6:('SBYTE', "b"),
+ 7:('UNDEFINED',"B"),
+ 8:('SSHORT', "h"),
+ 9:('SLONG', "i"),
+ 10:('SRATIONAL',"ii"),
+ 11:('FLOAT', "f"),
+ 12:('DOUBLE', "d")}
+
+FIELD_TYPE_OUT = { 'B': 1,
+ 's': 2,
+ 'H': 3,
+ 'I': 4,
+ 'II': 5,
+ 'b': 6,
+ 'h': 8,
+ 'i': 9,
+ 'ii': 10,
+ 'f': 11,
+ 'd': 12}
+
+#sample formats (http://www.awaresystems.be/imaging/tiff/tiffflags/sampleformat.html)
+SAMPLE_FORMAT_UINT = 1
+SAMPLE_FORMAT_INT = 2
+SAMPLE_FORMAT_FLOAT = 3 #floating point
+SAMPLE_FORMAT_VOID = 4 #undefined data, usually assumed UINT
+SAMPLE_FORMAT_COMPLEXINT = 5
+SAMPLE_FORMAT_COMPLEXIEEEFP = 6
+
+
+
+class TiffIO(object):
+ def __init__(self, filename, mode=None, cache_length=20, mono_output=False):
+ if mode is None:
+ mode = 'rb'
+ if 'b' not in mode:
+ mode = mode + 'b'
+ if 'a' in mode.lower():
+ raise IOError("Mode %s makes no sense on TIFF files. Consider 'rb+'" % mode)
+ if ('w' in mode):
+ if '+' not in mode:
+ mode += '+'
+
+ if hasattr(filename, "seek") and\
+ hasattr(filename, "read"):
+ fd = filename
+ self._access = None
+ else:
+ #the b is needed for windows and python 3
+ fd = open(filename, mode)
+ self._access = mode
+
+ self._initInternalVariables(fd)
+ self._maxImageCacheLength = cache_length
+ self._forceMonoOutput = mono_output
+
+ def _initInternalVariables(self, fd=None):
+ if fd is None:
+ fd = self.fd
+ else:
+ self.fd = fd
+ # read the order
+ fd.seek(0)
+ order = fd.read(2).decode()
+ if len(order):
+ if order == "II":
+ #intel, little endian
+ fileOrder = "little"
+ self._structChar = '<'
+ elif order == "MM":
+ #motorola, high endian
+ fileOrder = "big"
+ self._structChar = '>'
+ else:
+ raise IOError("File is not a Mar CCD file, nor a TIFF file")
+ a = fd.read(2)
+ fortyTwo = struct.unpack(self._structChar+"H",a)[0]
+ if fortyTwo != 42:
+ raise IOError("Invalid TIFF version %d" % fortyTwo)
+ else:
+ if DEBUG:
+ print("VALID TIFF VERSION")
+ if sys.byteorder != fileOrder:
+ swap = True
+ else:
+ swap = False
+ else:
+ if sys.byteorder == "little":
+ self._structChar = '<'
+ else:
+ self._structChar = '>'
+ swap = False
+ self._swap = swap
+ self._IFD = []
+ self._imageDataCacheIndex = []
+ self._imageDataCache = []
+ self._imageInfoCacheIndex = []
+ self._imageInfoCache = []
+ self.getImageFileDirectories(fd)
+
+ def __makeSureFileIsOpen(self):
+ if not self.fd.closed:
+ return
+ if DEBUG:
+ print("Reopening closed file")
+ fileName = self.fd.name
+ if self._access is None:
+ #we do not own the file
+ #open in read mode
+ newFile = open(fileName,'rb')
+ else:
+ newFile = open(fileName, self._access)
+ self.fd = newFile
+
+ def __makeSureFileIsClosed(self):
+ if self._access is None:
+ #we do not own the file
+ if DEBUG:
+ print("Not closing not owned file")
+ return
+
+ if not self.fd.closed:
+ self.fd.close()
+
+ def close(self):
+ return self.__makeSureFileIsClosed()
+
+ def getNumberOfImages(self):
+ #update for the case someone has done anything?
+ self._updateIFD()
+ return len(self._IFD)
+
+ def _updateIFD(self):
+ self.__makeSureFileIsOpen()
+ self.getImageFileDirectories()
+ self.__makeSureFileIsClosed()
+
+ def getImageFileDirectories(self, fd=None):
+ if fd is None:
+ fd = self.fd
+ else:
+ self.fd = fd
+ st = self._structChar
+ fd.seek(4)
+ self._IFD = []
+ nImages = 0
+ fmt = st + 'I'
+ inStr = fd.read(struct.calcsize(fmt))
+ if not len(inStr):
+ offsetToIFD = 0
+ else:
+ offsetToIFD = struct.unpack(fmt, inStr)[0]
+ if DEBUG:
+ print("Offset to first IFD = %d" % offsetToIFD)
+ while offsetToIFD != 0:
+ self._IFD.append(offsetToIFD)
+ nImages += 1
+ fd.seek(offsetToIFD)
+ fmt = st + 'H'
+ numberOfDirectoryEntries = struct.unpack(fmt,fd.read(struct.calcsize(fmt)))[0]
+ if DEBUG:
+ print("Number of directory entries = %d" % numberOfDirectoryEntries)
+
+ fmt = st + 'I'
+ fd.seek(offsetToIFD + 2 + 12 * numberOfDirectoryEntries)
+ offsetToIFD = struct.unpack(fmt,fd.read(struct.calcsize(fmt)))[0]
+ if DEBUG:
+ print("Next Offset to IFD = %d" % offsetToIFD)
+ #offsetToIFD = 0
+ if DEBUG:
+ print("Number of images found = %d" % nImages)
+ return nImages
+
+ def _parseImageFileDirectory(self, nImage):
+ offsetToIFD = self._IFD[nImage]
+ st = self._structChar
+ fd = self.fd
+ fd.seek(offsetToIFD)
+ fmt = st + 'H'
+ numberOfDirectoryEntries = struct.unpack(fmt,fd.read(struct.calcsize(fmt)))[0]
+ if DEBUG:
+ print("Number of directory entries = %d" % numberOfDirectoryEntries)
+
+ fmt = st + 'HHI4s'
+ tagIDList = []
+ fieldTypeList = []
+ nValuesList = []
+ valueOffsetList = []
+ for i in range(numberOfDirectoryEntries):
+ tagID, fieldType, nValues, valueOffset = struct.unpack(fmt, fd.read(12))
+ tagIDList.append(tagID)
+ fieldTypeList.append(fieldType)
+ nValuesList.append(nValues)
+ if nValues == 1:
+ ftype, vfmt = FIELD_TYPE[fieldType]
+ if ftype not in ['ASCII', 'RATIONAL', 'SRATIONAL']:
+ vfmt = st + vfmt
+ actualValue = struct.unpack(vfmt, valueOffset[0: struct.calcsize(vfmt)])[0]
+ valueOffsetList.append(actualValue)
+ else:
+ valueOffsetList.append(valueOffset)
+ elif (nValues < 5) and (fieldType == 2):
+ ftype, vfmt = FIELD_TYPE[fieldType]
+ vfmt = st + "%d%s" % (nValues,vfmt)
+ actualValue = struct.unpack(vfmt, valueOffset[0: struct.calcsize(vfmt)])[0]
+ valueOffsetList.append(actualValue)
+ else:
+ valueOffsetList.append(valueOffset)
+ if DEBUG:
+ if tagID in TAG_ID:
+ print("tagID = %s" % TAG_ID[tagID])
+ else:
+ print("tagID = %d" % tagID)
+ print("fieldType = %s" % FIELD_TYPE[fieldType][0])
+ print("nValues = %d" % nValues)
+ #if nValues == 1:
+ # print("valueOffset = %s" % valueOffset)
+ return tagIDList, fieldTypeList, nValuesList, valueOffsetList
+
+
+
+ def _readIFDEntry(self, tag, tagIDList, fieldTypeList, nValuesList, valueOffsetList):
+ fd = self.fd
+ st = self._structChar
+ idx = tagIDList.index(tag)
+ nValues = nValuesList[idx]
+ output = []
+ ftype, vfmt = FIELD_TYPE[fieldTypeList[idx]]
+ vfmt = st + "%d%s" % (nValues, vfmt)
+ requestedBytes = struct.calcsize(vfmt)
+ if nValues == 1:
+ output.append(valueOffsetList[idx])
+ elif requestedBytes < 5:
+ output.append(valueOffsetList[idx])
+ else:
+ fd.seek(struct.unpack(st+"I", valueOffsetList[idx])[0])
+ output = struct.unpack(vfmt, fd.read(requestedBytes))
+ return output
+
+ def getData(self, nImage, **kw):
+ if nImage >= len(self._IFD):
+ #update prior to raise an index error error
+ self._updateIFD()
+ return self._readImage(nImage, **kw)
+
+ def getImage(self, nImage):
+ return self.getData(nImage)
+
+ def getInfo(self, nImage, **kw):
+ if nImage >= len(self._IFD):
+ #update prior to raise an index error error
+ self._updateIFD()
+ # current = self._IFD[nImage]
+ return self._readInfo(nImage)
+
+ def _readInfo(self, nImage, close=True):
+ if nImage in self._imageInfoCacheIndex:
+ if DEBUG:
+ print("Reading info from cache")
+ return self._imageInfoCache[self._imageInfoCacheIndex.index(nImage)]
+
+ #read the header
+ self.__makeSureFileIsOpen()
+ tagIDList, fieldTypeList, nValuesList, valueOffsetList = self._parseImageFileDirectory(nImage)
+
+ #rows and columns
+ nColumns = valueOffsetList[tagIDList.index(TAG_NUMBER_OF_COLUMNS)]
+ nRows = valueOffsetList[tagIDList.index(TAG_NUMBER_OF_ROWS)]
+
+ #bits per sample
+ idx = tagIDList.index(TAG_BITS_PER_SAMPLE)
+ nBits = valueOffsetList[idx]
+ if nValuesList[idx] != 1:
+ #this happens with RGB and friends, nBits is not a single value
+ nBits = self._readIFDEntry(TAG_BITS_PER_SAMPLE,
+ tagIDList, fieldTypeList, nValuesList, valueOffsetList)
+
+
+ if TAG_COLORMAP in tagIDList:
+ idx = tagIDList.index(TAG_COLORMAP)
+ tmpColormap = self._readIFDEntry(TAG_COLORMAP,
+ tagIDList, fieldTypeList, nValuesList, valueOffsetList)
+ if max(tmpColormap) > 255:
+ tmpColormap = numpy.array(tmpColormap, dtype=numpy.uint16)
+ tmpColormap = (tmpColormap/256.).astype(numpy.uint8)
+ else:
+ tmpColormap = numpy.array(tmpColormap, dtype=numpy.uint8)
+ tmpColormap.shape = 3, -1
+ colormap = numpy.zeros((tmpColormap.shape[-1], 3), tmpColormap.dtype)
+ colormap[:,:] = tmpColormap.T
+ tmpColormap = None
+ else:
+ colormap = None
+
+ #sample format
+ if TAG_SAMPLE_FORMAT in tagIDList:
+ sampleFormat = valueOffsetList[tagIDList.index(TAG_SAMPLE_FORMAT)]
+ else:
+ #set to unknown
+ sampleFormat = SAMPLE_FORMAT_VOID
+
+ # compression
+ compression = False
+ compression_type = 1
+ if TAG_COMPRESSION in tagIDList:
+ compression_type = valueOffsetList[tagIDList.index(TAG_COMPRESSION)]
+ if compression_type == 1:
+ compression = False
+ else:
+ compression = True
+
+ #photometric interpretation
+ interpretation = 1
+ if TAG_PHOTOMETRIC_INTERPRETATION in tagIDList:
+ interpretation = valueOffsetList[tagIDList.index(TAG_PHOTOMETRIC_INTERPRETATION)]
+ else:
+ print("WARNING: Non standard TIFF. Photometric interpretation TAG missing")
+ helpString = ""
+ if sys.version > '2.6':
+ helpString = eval('b""')
+
+ if TAG_IMAGE_DESCRIPTION in tagIDList:
+ imageDescription = self._readIFDEntry(TAG_IMAGE_DESCRIPTION,
+ tagIDList, fieldTypeList, nValuesList, valueOffsetList)
+ if type(imageDescription) in [type([1]), type((1,))]:
+ imageDescription =helpString.join(imageDescription)
+ else:
+ imageDescription = "%d/%d" % (nImage+1, len(self._IFD))
+
+ if sys.version < '3.0':
+ defaultSoftware = "Unknown Software"
+ else:
+ defaultSoftware = bytes("Unknown Software",
+ encoding='utf-8')
+ if TAG_SOFTWARE in tagIDList:
+ software = self._readIFDEntry(TAG_SOFTWARE,
+ tagIDList, fieldTypeList, nValuesList, valueOffsetList)
+ if type(software) in [type([1]), type((1,))]:
+ software =helpString.join(software)
+ else:
+ software = defaultSoftware
+
+ if software == defaultSoftware:
+ try:
+ if sys.version < '3.0':
+ if imageDescription.upper().startswith("IMAGEJ"):
+ software = imageDescription.split("=")[0]
+ else:
+ tmpString = imageDescription.decode()
+ if tmpString.upper().startswith("IMAGEJ"):
+ software = bytes(tmpString.split("=")[0],
+ encoding='utf-8')
+ except:
+ pass
+
+ if TAG_DATE in tagIDList:
+ date = self._readIFDEntry(TAG_DATE,
+ tagIDList, fieldTypeList, nValuesList, valueOffsetList)
+ if type(date) in [type([1]), type((1,))]:
+ date =helpString.join(date)
+ else:
+ date = "Unknown Date"
+
+ stripOffsets = self._readIFDEntry(TAG_STRIP_OFFSETS,
+ tagIDList, fieldTypeList, nValuesList, valueOffsetList)
+ if TAG_ROWS_PER_STRIP in tagIDList:
+ rowsPerStrip = self._readIFDEntry(TAG_ROWS_PER_STRIP,
+ tagIDList, fieldTypeList, nValuesList, valueOffsetList)[0]
+ else:
+ rowsPerStrip = nRows
+ print("WARNING: Non standard TIFF. Rows per strip TAG missing")
+
+ if TAG_STRIP_BYTE_COUNTS in tagIDList:
+ stripByteCounts = self._readIFDEntry(TAG_STRIP_BYTE_COUNTS,
+ tagIDList, fieldTypeList, nValuesList, valueOffsetList)
+ else:
+ print("WARNING: Non standard TIFF. Strip byte counts TAG missing")
+ if hasattr(nBits, 'index'):
+ expectedSum = 0
+ for n in nBits:
+ expectedSum += int(nRows * nColumns * n / 8)
+ else:
+ expectedSum = int(nRows * nColumns * nBits / 8)
+ stripByteCounts = [expectedSum]
+
+ if close:
+ self.__makeSureFileIsClosed()
+
+ if self._forceMonoOutput and (interpretation > 1):
+ #color image but asked monochrome output
+ nBits = 32
+ colormap = None
+ sampleFormat = SAMPLE_FORMAT_FLOAT
+ interpretation = 1
+ #we cannot rely on any cache in this case
+ useInfoCache = False
+ if DEBUG:
+ print("FORCED MONO")
+ else:
+ useInfoCache = True
+
+ info = {}
+ info["nRows"] = nRows
+ info["nColumns"] = nColumns
+ info["nBits"] = nBits
+ info["compression"] = compression
+ info["compression_type"] = compression_type
+ info["imageDescription"] = imageDescription
+ info["stripOffsets"] = stripOffsets #This contains the file offsets to the data positions
+ info["rowsPerStrip"] = rowsPerStrip
+ info["stripByteCounts"] = stripByteCounts #bytes in strip since I do not support compression
+ info["software"] = software
+ info["date"] = date
+ info["colormap"] = colormap
+ info["sampleFormat"] = sampleFormat
+ info["photometricInterpretation"] = interpretation
+ infoDict = {}
+ if sys.version < '3.0':
+ testString = 'PyMca'
+ else:
+ testString = eval('b"PyMca"')
+ if software.startswith(testString):
+ #str to make sure python 2.x sees it as string and not unicode
+ if sys.version < '3.0':
+ descriptionString = imageDescription
+ else:
+ descriptionString = str(imageDescription.decode())
+ #interpret the image description in terms of supplied
+ #information at writing time
+ items = descriptionString.split('=')
+ for i in range(int(len(items)/2)):
+ key = "%s" % items[i*2]
+ #get rid of the \n at the end of the value
+ value = "%s" % items[i*2+1][:-1]
+ infoDict[key] = value
+ info['info'] = infoDict
+
+ if (self._maxImageCacheLength > 0) and useInfoCache:
+ self._imageInfoCacheIndex.insert(0,nImage)
+ self._imageInfoCache.insert(0, info)
+ if len(self._imageInfoCacheIndex) > self._maxImageCacheLength:
+ self._imageInfoCacheIndex = self._imageInfoCacheIndex[:self._maxImageCacheLength]
+ self._imageInfoCache = self._imageInfoCache[:self._maxImageCacheLength]
+ return info
+
+ def _readImage(self, nImage, **kw):
+ if DEBUG:
+ print("Reading image %d" % nImage)
+ if 'close' in kw:
+ close = kw['close']
+ else:
+ close = True
+ rowMin = kw.get('rowMin', None)
+ rowMax = kw.get('rowMax', None)
+ if nImage in self._imageDataCacheIndex:
+ if DEBUG:
+ print("Reading image data from cache")
+ return self._imageDataCache[self._imageDataCacheIndex.index(nImage)]
+
+ self.__makeSureFileIsOpen()
+ if self._forceMonoOutput:
+ oldMono = True
+ else:
+ oldMono = False
+ try:
+ self._forceMonoOutput = False
+ info = self._readInfo(nImage, close=False)
+ self._forceMonoOutput = oldMono
+ except:
+ self._forceMonoOutput = oldMono
+ raise
+ compression = info['compression']
+ compression_type = info['compression_type']
+ if compression:
+ if compression_type != 32773:
+ raise IOError("Compressed TIFF images not supported except packbits")
+ else:
+ #PackBits compression
+ if DEBUG:
+ print("Using PackBits compression")
+
+ interpretation = info["photometricInterpretation"]
+ if interpretation == 2:
+ #RGB
+ pass
+ #raise IOError("RGB Image. Only grayscale images supported")
+ elif interpretation == 3:
+ #Palette Color Image
+ pass
+ #raise IOError("Palette-color Image. Only grayscale images supported")
+ elif interpretation > 2:
+ #Palette Color Image
+ raise IOError("Only grayscale images supported")
+
+ nRows = info["nRows"]
+ nColumns = info["nColumns"]
+ nBits = info["nBits"]
+ colormap = info["colormap"]
+ sampleFormat = info["sampleFormat"]
+
+ if rowMin is None:
+ rowMin = 0
+
+ if rowMax is None:
+ rowMax = nRows - 1
+
+ if rowMin < 0:
+ rowMin = nRows - rowMin
+
+ if rowMax < 0:
+ rowMax = nRows - rowMax
+
+ if rowMax < rowMin:
+ txt = "Max Row smaller than Min Row. Reverse selection not supported"
+ raise NotImplementedError(txt)
+
+ if rowMin >= nRows:
+ raise IndexError("Image only has %d rows" % nRows)
+
+ if rowMax >= nRows:
+ raise IndexError("Image only has %d rows" % nRows)
+
+ if sampleFormat == SAMPLE_FORMAT_FLOAT:
+ if nBits == 32:
+ dtype = numpy.float32
+ elif nBits == 64:
+ dtype = numpy.float64
+ else:
+ raise ValueError("Unsupported number of bits for a float: %d" % nBits)
+ elif sampleFormat in [SAMPLE_FORMAT_UINT, SAMPLE_FORMAT_VOID]:
+ if nBits in [8, (8, 8, 8), [8, 8, 8]]:
+ dtype = numpy.uint8
+ elif nBits in [16, (16, 16, 16), [16, 16, 16]]:
+ dtype = numpy.uint16
+ elif nBits in [32, (32, 32, 32), [32, 32, 32]]:
+ dtype = numpy.uint32
+ elif nBits in [64, (64, 64, 64), [64, 64, 64]]:
+ dtype = numpy.uint64
+ else:
+ raise ValueError("Unsupported number of bits for unsigned int: %s" % (nBits,))
+ elif sampleFormat == SAMPLE_FORMAT_INT:
+ if nBits in [8, (8, 8, 8), [8, 8, 8]]:
+ dtype = numpy.int8
+ elif nBits in [16, (16, 16, 16), [16, 16, 16]]:
+ dtype = numpy.int16
+ elif nBits in [32, (32, 32, 32), [32, 32, 32]]:
+ dtype = numpy.int32
+ elif nBits in [64, (64, 64, 64), [64, 64, 64]]:
+ dtype = numpy.int64
+ else:
+ raise ValueError("Unsupported number of bits for signed int: %s" % (nBits,))
+ else:
+ raise ValueError("Unsupported combination. Bits = %s Format = %d" % (nBits, sampleFormat))
+ if hasattr(nBits, 'index'):
+ image = numpy.zeros((nRows, nColumns, len(nBits)), dtype=dtype)
+ elif colormap is not None:
+ #should I use colormap dtype?
+ image = numpy.zeros((nRows, nColumns, 3), dtype=dtype)
+ else:
+ image = numpy.zeros((nRows, nColumns), dtype=dtype)
+
+ fd = self.fd
+ st = self._structChar
+ stripOffsets = info["stripOffsets"] #This contains the file offsets to the data positions
+ rowsPerStrip = info["rowsPerStrip"]
+ stripByteCounts = info["stripByteCounts"] #bytes in strip since I do not support compression
+
+ rowStart = 0
+ if len(stripOffsets) == 1:
+ bytesPerRow = int(stripByteCounts[0]/rowsPerStrip)
+ if nRows == rowsPerStrip:
+ actualBytesPerRow = int(image.nbytes/nRows)
+ if actualBytesPerRow != bytesPerRow:
+ print("Warning: Bogus StripByteCounts information")
+ bytesPerRow = actualBytesPerRow
+ fd.seek(stripOffsets[0] + rowMin * bytesPerRow)
+ nBytes = (rowMax-rowMin+1) * bytesPerRow
+ if self._swap:
+ readout = numpy.fromstring(fd.read(nBytes), dtype).byteswap()
+ else:
+ readout = numpy.fromstring(fd.read(nBytes), dtype)
+ if hasattr(nBits, 'index'):
+ readout.shape = -1, nColumns, len(nBits)
+ elif info['colormap'] is not None:
+ readout = colormap[readout]
+ else:
+ readout.shape = -1, nColumns
+ image[rowMin:rowMax+1, :] = readout
+ else:
+ for i in range(len(stripOffsets)):
+ #the amount of rows
+ nRowsToRead = rowsPerStrip
+ rowEnd = int(min(rowStart+nRowsToRead, nRows))
+ if rowEnd < rowMin:
+ rowStart += nRowsToRead
+ continue
+ if (rowStart > rowMax):
+ break
+ #we are in position
+ fd.seek(stripOffsets[i])
+ #the amount of bytes to read
+ nBytes = stripByteCounts[i]
+ if compression_type == 32773:
+ try:
+ bufferBytes = bytes()
+ except:
+ #python 2.5 ...
+ bufferBytes = ""
+ #packBits
+ readBytes = 0
+ #intermediate buffer
+ tmpBuffer = fd.read(nBytes)
+ while readBytes < nBytes:
+ n = struct.unpack('b', tmpBuffer[readBytes:(readBytes+1)])[0]
+ readBytes += 1
+ if n >= 0:
+ #should I prevent reading more than the
+ #length of the chain? Let's python raise
+ #the exception...
+ bufferBytes += tmpBuffer[readBytes:\
+ readBytes+(n+1)]
+ readBytes += (n+1)
+ elif n > -128:
+ bufferBytes += (-n+1) * tmpBuffer[readBytes:(readBytes+1)]
+ readBytes += 1
+ else:
+ #if read -128 ignore the byte
+ continue
+ if self._swap:
+ readout = numpy.fromstring(bufferBytes, dtype).byteswap()
+ else:
+ readout = numpy.fromstring(bufferBytes, dtype)
+ if hasattr(nBits, 'index'):
+ readout.shape = -1, nColumns, len(nBits)
+ elif info['colormap'] is not None:
+ readout = colormap[readout]
+ readout.shape = -1, nColumns, 3
+ else:
+ readout.shape = -1, nColumns
+ image[rowStart:rowEnd, :] = readout
+ else:
+ if 1:
+ #use numpy
+ if self._swap:
+ readout = numpy.fromstring(fd.read(nBytes), dtype).byteswap()
+ else:
+ readout = numpy.fromstring(fd.read(nBytes), dtype)
+ if hasattr(nBits, 'index'):
+ readout.shape = -1, nColumns, len(nBits)
+ elif colormap is not None:
+ readout = colormap[readout]
+ readout.shape = -1, nColumns, 3
+ else:
+ readout.shape = -1, nColumns
+ image[rowStart:rowEnd, :] = readout
+ else:
+ #using struct
+ readout = numpy.array(struct.unpack(st+"%df" % int(nBytes/4), fd.read(nBytes)),
+ dtype=dtype)
+ if hasattr(nBits, 'index'):
+ readout.shape = -1, nColumns, len(nBits)
+ elif colormap is not None:
+ readout = colormap[readout]
+ readout.shape = -1, nColumns, 3
+ else:
+ readout.shape = -1, nColumns
+ image[rowStart:rowEnd, :] = readout
+ rowStart += nRowsToRead
+ if close:
+ self.__makeSureFileIsClosed()
+
+ if len(image.shape) == 3:
+ #color image
+ if self._forceMonoOutput:
+ #color image, convert to monochrome
+ image = (image[:,:,0] * 0.114 +\
+ image[:,:,1] * 0.587 +\
+ image[:,:,2] * 0.299).astype(numpy.float32)
+
+ if (rowMin == 0) and (rowMax == (nRows-1)):
+ self._imageDataCacheIndex.insert(0,nImage)
+ self._imageDataCache.insert(0, image)
+ if len(self._imageDataCacheIndex) > self._maxImageCacheLength:
+ self._imageDataCacheIndex = self._imageDataCacheIndex[:self._maxImageCacheLength]
+ self._imageDataCache = self._imageDataCache[:self._maxImageCacheLength]
+
+ return image
+
+ def writeImage(self, image0, info=None, software=None, date=None):
+ if software is None:
+ software = 'PyMca.TiffIO'
+ #if date is None:
+ # date = time.ctime()
+
+ self.__makeSureFileIsOpen()
+ fd = self.fd
+ #prior to do anything, perform some tests
+ if not len(image0.shape):
+ raise ValueError("Empty image")
+ if len(image0.shape) == 1:
+ #get a different view
+ image = image0[:]
+ image.shape = 1, -1
+ else:
+ image = image0
+
+ if image.dtype == numpy.float64:
+ image = image.astype(numpy.float32)
+ fd.seek(0)
+ mode = fd.mode
+ name = fd.name
+ if 'w' in mode:
+ #we have to overwrite the file
+ self.__makeSureFileIsClosed()
+ fd = None
+ if os.path.exists(name):
+ os.remove(name)
+ fd = open(name, mode='wb+')
+ self._initEmptyFile(fd)
+ self.fd = fd
+
+ #read the file size
+ self.__makeSureFileIsOpen()
+ fd = self.fd
+ fd.seek(0, os.SEEK_END)
+ endOfFile = fd.tell()
+ if fd.tell() == 0:
+ self._initEmptyFile(fd)
+ fd.seek(0, os.SEEK_END)
+ endOfFile = fd.tell()
+
+ #init internal variables
+ self._initInternalVariables(fd)
+ st = self._structChar
+
+ #get the image file directories
+ nImages = self.getImageFileDirectories()
+ if DEBUG:
+ print("File contains %d images" % nImages)
+ if nImages == 0:
+ fd.seek(4)
+ fmt = st + 'I'
+ fd.write(struct.pack(fmt, endOfFile))
+ else:
+ fd.seek(self._IFD[-1])
+ fmt = st + 'H'
+ numberOfDirectoryEntries = struct.unpack(fmt,fd.read(struct.calcsize(fmt)))[0]
+ fmt = st + 'I'
+ pos = self._IFD[-1] + 2 + 12 * numberOfDirectoryEntries
+ fd.seek(pos)
+ fmt = st + 'I'
+ fd.write(struct.pack(fmt, endOfFile))
+ fd.flush()
+
+ #and we can write at the end of the file, find out the file length
+ fd.seek(0, os.SEEK_END)
+
+ #get the description information from the input information
+ if info is None:
+ description = info
+ else:
+ description = "%s" % ""
+ for key in info.keys():
+ description += "%s=%s\n" % (key, info[key])
+
+ #get the image file directory
+ outputIFD = self._getOutputIFD(image, description=description,
+ software=software,
+ date=date)
+
+ #write the new IFD
+ fd.write(outputIFD)
+
+ #write the image
+ if self._swap:
+ fd.write(image.byteswap().tostring())
+ else:
+ fd.write(image.tostring())
+
+ fd.flush()
+ self.fd=fd
+ self.__makeSureFileIsClosed()
+
+ def _initEmptyFile(self, fd=None):
+ if fd is None:
+ fd = self.fd
+ if sys.byteorder == "little":
+ order = "II"
+ #intel, little endian
+ fileOrder = "little"
+ self._structChar = '<'
+ else:
+ order = "MM"
+ #motorola, high endian
+ fileOrder = "big"
+ self._structChar = '>'
+ st = self._structChar
+ if fileOrder == sys.byteorder:
+ self._swap = False
+ else:
+ self._swap = True
+ fd.seek(0)
+ if sys.version < '3.0':
+ fd.write(struct.pack(st+'2s', order))
+ fd.write(struct.pack(st+'H', 42))
+ fd.write(struct.pack(st+'I', 0))
+ else:
+ fd.write(struct.pack(st+'2s', bytes(order,'utf-8')))
+ fd.write(struct.pack(st+'H', 42))
+ fd.write(struct.pack(st+'I', 0))
+ fd.flush()
+
+ def _getOutputIFD(self, image, description=None, software=None, date=None):
+ #the tags have to be in order
+ #the very minimum is
+ #256:"NumberOfColumns", # S or L ImageWidth
+ #257:"NumberOfRows", # S or L ImageHeight
+ #258:"BitsPerSample", # S Number of bits per component
+ #259:"Compression", # SHORT (1 - NoCompression, ...
+ #262:"PhotometricInterpretation", # SHORT (0 - WhiteIsZero, 1 -BlackIsZero, 2 - RGB, 3 - Palette color
+ #270:"ImageDescription", # ASCII
+ #273:"StripOffsets", # S or L, for each strip, the byte offset of the strip
+ #277:"SamplesPerPixel", # SHORT (>=3) only for RGB images
+ #278:"RowsPerStrip", # S or L, number of rows in each back may be not for the last
+ #279:"StripByteCounts", # S or L, The number of bytes in the strip AFTER any compression
+ #305:"Software", # ASCII
+ #306:"Date", # ASCII
+ #339:"SampleFormat", # SHORT Interpretation of data in each pixel
+
+ nDirectoryEntries = 9
+ imageDescription = None
+ if description is not None:
+ descriptionLength = len(description)
+ while descriptionLength < 4:
+ description = description + " "
+ descriptionLength = len(description)
+ if sys.version >= '3.0':
+ description = bytes(description, 'utf-8')
+ elif type(description) != type(""):
+ try:
+ description = description.decode('utf-8')
+ except UnicodeDecodeError:
+ try:
+ description = description.decode('latin-1')
+ except UnicodeDecodeError:
+ description = "%s" % description
+ if sys.version > '2.6':
+ description=description.encode('utf-8', errors="ignore")
+ description = "%s" % description
+ descriptionLength = len(description)
+ imageDescription = struct.pack("%ds" % descriptionLength, description)
+ nDirectoryEntries += 1
+
+ #software
+ if software is not None:
+ softwareLength = len(software)
+ while softwareLength < 4:
+ software = software + " "
+ softwareLength = len(software)
+ if sys.version >= '3.0':
+ software = bytes(software, 'utf-8')
+ softwarePackedString = struct.pack("%ds" % softwareLength, software)
+ nDirectoryEntries += 1
+ else:
+ softwareLength = 0
+
+ if date is not None:
+ dateLength = len(date)
+ if sys.version >= '3.0':
+ date = bytes(date, 'utf-8')
+ datePackedString = struct.pack("%ds" % dateLength, date)
+ dateLength = len(datePackedString)
+ nDirectoryEntries += 1
+ else:
+ dateLength = 0
+
+ if len(image.shape) == 2:
+ nRows, nColumns = image.shape
+ nChannels = 1
+ elif len(image.shape) == 3:
+ nRows, nColumns, nChannels = image.shape
+ else:
+ raise RuntimeError("Image does not have the right shape")
+ dtype = image.dtype
+ bitsPerSample = int(dtype.str[-1]) * 8
+
+ #only uncompressed data
+ compression = 1
+
+ #interpretation, black is zero
+ if nChannels == 1:
+ interpretation = 1
+ bitsPerSampleLength = 0
+ elif nChannels == 3:
+ interpretation = 2
+ bitsPerSampleLength = 3 * 2 # To store 3 shorts
+ nDirectoryEntries += 1 # For SamplesPerPixel
+ else:
+ raise RuntimeError(
+ "Image with %d color channel(s) not supported" % nChannels)
+
+ #image description
+ if imageDescription is not None:
+ descriptionLength = len(imageDescription)
+ else:
+ descriptionLength = 0
+
+ #strip offsets
+ #we are putting them after the directory and the directory is
+ #at the end of the file
+ self.fd.seek(0, os.SEEK_END)
+ endOfFile = self.fd.tell()
+ if endOfFile == 0:
+ #empty file
+ endOfFile = 8
+
+ #rows per strip
+ if ALLOW_MULTIPLE_STRIPS:
+ #try to segment the image in several pieces
+ if not (nRows % 4):
+ rowsPerStrip = int(nRows/4)
+ elif not (nRows % 10):
+ rowsPerStrip = int(nRows/10)
+ elif not (nRows % 8):
+ rowsPerStrip = int(nRows/8)
+ elif not (nRows % 4):
+ rowsPerStrip = int(nRows/4)
+ elif not (nRows % 2):
+ rowsPerStrip = int(nRows/2)
+ else:
+ rowsPerStrip = nRows
+ else:
+ rowsPerStrip = nRows
+
+ #stripByteCounts
+ stripByteCounts = int(nColumns * rowsPerStrip *
+ bitsPerSample * nChannels / 8)
+
+ if descriptionLength > 4:
+ stripOffsets0 = endOfFile + dateLength + descriptionLength +\
+ 2 + 12 * nDirectoryEntries + 4
+ else:
+ stripOffsets0 = endOfFile + dateLength + \
+ 2 + 12 * nDirectoryEntries + 4
+
+ if softwareLength > 4:
+ stripOffsets0 += softwareLength
+
+ stripOffsets0 += bitsPerSampleLength
+
+ stripOffsets = [stripOffsets0]
+ stripOffsetsLength = 0
+ stripOffsetsString = None
+
+ st = self._structChar
+
+ if rowsPerStrip != nRows:
+ nStripOffsets = int(nRows/rowsPerStrip)
+ fmt = st + 'I'
+ stripOffsetsLength = struct.calcsize(fmt) * nStripOffsets
+ stripOffsets0 += stripOffsetsLength
+ #the length for the stripByteCounts will be the same
+ stripOffsets0 += stripOffsetsLength
+ stripOffsets = []
+ for i in range(nStripOffsets):
+ value = stripOffsets0 + i * stripByteCounts
+ stripOffsets.append(value)
+ if i == 0:
+ stripOffsetsString = struct.pack(fmt, value)
+ stripByteCountsString = struct.pack(fmt, stripByteCounts)
+ else:
+ stripOffsetsString += struct.pack(fmt, value)
+ stripByteCountsString += struct.pack(fmt, stripByteCounts)
+
+ if DEBUG:
+ print("IMAGE WILL START AT %d" % stripOffsets[0])
+
+ #sample format
+ if dtype in [numpy.float32, numpy.float64] or\
+ dtype.str[-2] == 'f':
+ sampleFormat = SAMPLE_FORMAT_FLOAT
+ elif dtype in [numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64]:
+ sampleFormat = SAMPLE_FORMAT_UINT
+ elif dtype in [numpy.int8, numpy.int16, numpy.int32, numpy.int64]:
+ sampleFormat = SAMPLE_FORMAT_INT
+ else:
+ raise ValueError("Unsupported data type %s" % dtype)
+
+ info = {}
+ info["nColumns"] = nColumns
+ info["nRows"] = nRows
+ info["nBits"] = bitsPerSample
+ info["compression"] = compression
+ info["photometricInterpretation"] = interpretation
+ info["stripOffsets"] = stripOffsets
+ if interpretation == 2:
+ info["samplesPerPixel"] = 3 # No support for extra samples
+ info["rowsPerStrip"] = rowsPerStrip
+ info["stripByteCounts"] = stripByteCounts
+ info["date"] = date
+ info["sampleFormat"] = sampleFormat
+
+ outputIFD = ""
+ if sys.version > '2.6':
+ outputIFD = eval('b""')
+
+ fmt = st + "H"
+ outputIFD += struct.pack(fmt, nDirectoryEntries)
+
+ fmt = st + "HHII"
+ outputIFD += struct.pack(fmt, TAG_NUMBER_OF_COLUMNS,
+ FIELD_TYPE_OUT['I'],
+ 1,
+ info["nColumns"])
+ outputIFD += struct.pack(fmt, TAG_NUMBER_OF_ROWS,
+ FIELD_TYPE_OUT['I'],
+ 1,
+ info["nRows"])
+
+ if info["photometricInterpretation"] == 1:
+ fmt = st + 'HHIHH'
+ outputIFD += struct.pack(fmt, TAG_BITS_PER_SAMPLE,
+ FIELD_TYPE_OUT['H'],
+ 1,
+ info["nBits"], 0)
+ elif info["photometricInterpretation"] == 2:
+ fmt = st + 'HHII'
+ outputIFD += struct.pack(fmt, TAG_BITS_PER_SAMPLE,
+ FIELD_TYPE_OUT['H'],
+ 3,
+ info["stripOffsets"][0] - \
+ 2 * stripOffsetsLength - \
+ descriptionLength - \
+ dateLength - \
+ softwareLength - \
+ bitsPerSampleLength)
+ else:
+ raise RuntimeError("Unsupported photometric interpretation")
+
+ fmt = st + 'HHIHH'
+ outputIFD += struct.pack(fmt, TAG_COMPRESSION,
+ FIELD_TYPE_OUT['H'],
+ 1,
+ info["compression"],0)
+ fmt = st + 'HHIHH'
+ outputIFD += struct.pack(fmt, TAG_PHOTOMETRIC_INTERPRETATION,
+ FIELD_TYPE_OUT['H'],
+ 1,
+ info["photometricInterpretation"],0)
+
+ if imageDescription is not None:
+ descriptionLength = len(imageDescription)
+ if descriptionLength > 4:
+ fmt = st + 'HHII'
+ outputIFD += struct.pack(fmt, TAG_IMAGE_DESCRIPTION,
+ FIELD_TYPE_OUT['s'],
+ descriptionLength,
+ info["stripOffsets"][0]-\
+ 2*stripOffsetsLength-\
+ descriptionLength)
+ else:
+ #it has to have length 4
+ fmt = st + 'HHI%ds' % descriptionLength
+ outputIFD += struct.pack(fmt, TAG_IMAGE_DESCRIPTION,
+ FIELD_TYPE_OUT['s'],
+ descriptionLength,
+ description)
+
+ if len(stripOffsets) == 1:
+ fmt = st + 'HHII'
+ outputIFD += struct.pack(fmt, TAG_STRIP_OFFSETS,
+ FIELD_TYPE_OUT['I'],
+ 1,
+ info["stripOffsets"][0])
+ else:
+ fmt = st + 'HHII'
+ outputIFD += struct.pack(fmt, TAG_STRIP_OFFSETS,
+ FIELD_TYPE_OUT['I'],
+ len(stripOffsets),
+ info["stripOffsets"][0]-2*stripOffsetsLength)
+
+ if info["photometricInterpretation"] == 2:
+ fmt = st + 'HHIHH'
+ outputIFD += struct.pack(fmt, TAG_SAMPLES_PER_PIXEL,
+ FIELD_TYPE_OUT['H'],
+ 1,
+ info["samplesPerPixel"], 0)
+
+ fmt = st + 'HHII'
+ outputIFD += struct.pack(fmt, TAG_ROWS_PER_STRIP,
+ FIELD_TYPE_OUT['I'],
+ 1,
+ info["rowsPerStrip"])
+
+ if len(stripOffsets) == 1:
+ fmt = st + 'HHII'
+ outputIFD += struct.pack(fmt, TAG_STRIP_BYTE_COUNTS,
+ FIELD_TYPE_OUT['I'],
+ 1,
+ info["stripByteCounts"])
+ else:
+ fmt = st + 'HHII'
+ outputIFD += struct.pack(fmt, TAG_STRIP_BYTE_COUNTS,
+ FIELD_TYPE_OUT['I'],
+ len(stripOffsets),
+ info["stripOffsets"][0]-stripOffsetsLength)
+
+ if software is not None:
+ if softwareLength > 4:
+ fmt = st + 'HHII'
+ outputIFD += struct.pack(fmt, TAG_SOFTWARE,
+ FIELD_TYPE_OUT['s'],
+ softwareLength,
+ info["stripOffsets"][0]-\
+ 2*stripOffsetsLength-\
+ descriptionLength-softwareLength-dateLength)
+ else:
+ #it has to have length 4
+ fmt = st + 'HHI%ds' % softwareLength
+ outputIFD += struct.pack(fmt, TAG_SOFTWARE,
+ FIELD_TYPE_OUT['s'],
+ softwareLength,
+ softwarePackedString)
+
+ if date is not None:
+ fmt = st + 'HHII'
+ outputIFD += struct.pack(fmt, TAG_DATE,
+ FIELD_TYPE_OUT['s'],
+ dateLength,
+ info["stripOffsets"][0]-\
+ 2*stripOffsetsLength-\
+ descriptionLength-dateLength)
+
+ fmt = st + 'HHIHH'
+ outputIFD += struct.pack(fmt, TAG_SAMPLE_FORMAT,
+ FIELD_TYPE_OUT['H'],
+ 1,
+ info["sampleFormat"],0)
+ fmt = st + 'I'
+ outputIFD += struct.pack(fmt, 0)
+
+ if info["photometricInterpretation"] == 2:
+ outputIFD += struct.pack('HHH', info["nBits"],
+ info["nBits"], info["nBits"])
+
+ if softwareLength > 4:
+ outputIFD += softwarePackedString
+
+ if date is not None:
+ outputIFD += datePackedString
+
+ if imageDescription is not None:
+ if descriptionLength > 4:
+ outputIFD += imageDescription
+
+ if stripOffsetsString is not None:
+ outputIFD += stripOffsetsString
+ outputIFD += stripByteCountsString
+
+ return outputIFD
+
+
+if __name__ == "__main__":
+ filename = sys.argv[1]
+ dtype = numpy.uint16
+ if not os.path.exists(filename):
+ print("Testing file creation")
+ tif = TiffIO(filename, mode = 'wb+')
+ data = numpy.arange(10000).astype(dtype)
+ data.shape = 100, 100
+ tif.writeImage(data, info={'Title':'1st'})
+ tif = None
+ if os.path.exists(filename):
+ print("Testing image appending")
+ tif = TiffIO(filename, mode = 'rb+')
+ tif.writeImage((data*2).astype(dtype), info={'Title':'2nd'})
+ tif = None
+ tif = TiffIO(filename)
+ print("Number of images = %d" % tif.getNumberOfImages())
+ for i in range(tif.getNumberOfImages()):
+ info = tif.getInfo(i)
+ for key in info:
+ if key not in ["colormap"]:
+ print("%s = %s" % (key, info[key]))
+ elif info['colormap'] is not None:
+ print("RED %s = %s" % (key, info[key][0:10, 0]))
+ print("GREEN %s = %s" % (key, info[key][0:10, 1]))
+ print("BLUE %s = %s" % (key, info[key][0:10, 2]))
+ data = tif.getImage(i)[0, 0:10]
+ print("data [0, 0:10] = ", data)
+
diff --git a/silx/third_party/__init__.py b/silx/third_party/__init__.py
new file mode 100644
index 0000000..5f31822
--- /dev/null
+++ b/silx/third_party/__init__.py
@@ -0,0 +1,28 @@
+# coding: utf-8
+# /*##########################################################################
+#
+# Copyright (c) 2015-2016 European Synchrotron Radiation Facility
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#
+# ###########################################################################*/
+
+__authors__ = ["Jérôme Kieffer"]
+__license__ = "MIT"
+__date__ = "09/10/2015"
diff --git a/silx/third_party/_local/__init__.py b/silx/third_party/_local/__init__.py
new file mode 100644
index 0000000..03973e5
--- /dev/null
+++ b/silx/third_party/_local/__init__.py
@@ -0,0 +1,36 @@
+# coding: utf-8
+# /*##########################################################################
+#
+# Copyright (c) 2015-2016 European Synchrotron Radiation Facility
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#
+# ###########################################################################*/
+"""
+Package containing external modules which can be available as it is as an
+external python library.
+
+They are stored here to reduce python library dependancies.
+
+This package can be removed if all dependancies are available in the target
+system as python libraries.
+"""
+__authors__ = ["Valentin Valls"]
+__license__ = "MIT"
+__date__ = "26/04/2017"
diff --git a/silx/third_party/_local/six.py b/silx/third_party/_local/six.py
new file mode 100644
index 0000000..190c023
--- /dev/null
+++ b/silx/third_party/_local/six.py
@@ -0,0 +1,868 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2015 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.10.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+ exec_("""def raise_from(value, from_value):
+ if from_value is None:
+ raise value
+ raise value from from_value
+""")
+elif sys.version_info[:2] > (3, 2):
+ exec_("""def raise_from(value, from_value):
+ raise value from from_value
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ def wrapper(f):
+ f = functools.wraps(wrapped, assigned, updated)(f)
+ f.__wrapped__ = wrapped
+ return f
+ return wrapper
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(meta):
+
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/silx/third_party/setup.py b/silx/third_party/setup.py
new file mode 100644
index 0000000..7477456
--- /dev/null
+++ b/silx/third_party/setup.py
@@ -0,0 +1,48 @@
+# coding: ascii
+#
+# JK: Numpy.distutils which imports this does not handle utf-8 in version<1.12
+#
+# /*##########################################################################
+#
+# Copyright (c) 2016 European Synchrotron Radiation Facility
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#
+# ###########################################################################*/
+
+__authors__ = ["Valentin Valls"]
+__license__ = "MIT"
+__date__ = "26/04/2017"
+
+import os
+from numpy.distutils.misc_util import Configuration
+
+
+def configuration(parent_package='', top_path=None):
+ config = Configuration('third_party', parent_package, top_path)
+ # includes _local only if it is available
+ local_path = os.path.join(top_path, parent_package, "third_party", "_local")
+ if os.path.exists(local_path):
+ config.add_subpackage('_local')
+ return config
+
+
+if __name__ == "__main__":
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/silx/third_party/six.py b/silx/third_party/six.py
new file mode 100644
index 0000000..a1fe786
--- /dev/null
+++ b/silx/third_party/six.py
@@ -0,0 +1,49 @@
+# coding: utf-8
+# /*##########################################################################
+#
+# Copyright (c) 2015-2016 European Synchrotron Radiation Facility
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#
+# ###########################################################################*/
+"""Wrapper module for the `six` library.
+
+Feed this module using a local silx copy of `six` if it exists.
+Else it expect to have an available `six` library installed in the Python path.
+
+It should be used like that:
+
+.. code-block::
+
+ from silx.third_party import six
+
+"""
+
+from __future__ import absolute_import
+
+__authors__ = ["Valentin Valls"]
+__license__ = "MIT"
+__date__ = "26/04/2017"
+
+try:
+ # try to import our local version of six
+ from ._local.six import * # noqa
+except ImportError:
+ # else try to import it from the python path
+ from six import * # noqa