summaryrefslogtreecommitdiff
path: root/src/external/rawspeed/src/librawspeed/decompressors
diff options
context:
space:
mode:
authorDavid Bremner <bremner@debian.org>2018-12-25 22:44:44 +0900
committerDavid Bremner <bremner@debian.org>2018-12-25 22:44:44 +0900
commit33cc53ba511843ac9857470e74e043013d3620fe (patch)
treeebc0e94b5486710fc2f381d92fab9a594f1dc62c /src/external/rawspeed/src/librawspeed/decompressors
parent1fddb41abdd4ca3be5bfdfe019e126b188879e15 (diff)
Importing darktable_2.6.0.orig.tar.xz
Diffstat (limited to 'src/external/rawspeed/src/librawspeed/decompressors')
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/AbstractDngDecompressor.cpp22
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/AbstractDngDecompressor.h1
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/AbstractHuffmanTable.h247
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/AbstractLJpegDecompressor.cpp21
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/AbstractLJpegDecompressor.h19
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/AbstractParallelizedDecompressor.h2
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/BinaryHuffmanTree.h237
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/CMakeLists.txt12
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/Cr2Decompressor.cpp15
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/Cr2Decompressor.h7
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/CrwDecompressor.cpp48
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/CrwDecompressor.h4
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/DeflateDecompressor.cpp16
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/DeflateDecompressor.h13
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/FujiDecompressor.cpp148
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/FujiDecompressor.h43
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/HasselbladDecompressor.cpp8
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/HasselbladDecompressor.h3
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/HuffmanTable.h351
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableLUT.h257
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableLookup.h171
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableTree.h165
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableVector.h155
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/JpegDecompressor.cpp15
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/JpegDecompressor.h3
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/KodakDecompressor.cpp26
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/KodakDecompressor.h5
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/LJpegDecompressor.cpp107
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/LJpegDecompressor.h10
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/NikonDecompressor.cpp434
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/NikonDecompressor.h33
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/OlympusDecompressor.cpp49
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/OlympusDecompressor.h3
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressor.cpp292
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressor.h55
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressorV5.cpp255
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressorV5.h109
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/PentaxDecompressor.cpp28
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/PentaxDecompressor.h8
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/PhaseOneDecompressor.cpp152
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/PhaseOneDecompressor.h55
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/SamsungV0Decompressor.cpp11
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/SamsungV0Decompressor.h2
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/SamsungV1Decompressor.cpp21
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/SamsungV2Decompressor.cpp14
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/SonyArw1Decompressor.cpp15
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/SonyArw1Decompressor.h3
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/SonyArw2Decompressor.cpp6
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/UncompressedDecompressor.h2
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/VC5Decompressor.cpp829
-rw-r--r--src/external/rawspeed/src/librawspeed/decompressors/VC5Decompressor.h229
51 files changed, 3902 insertions, 834 deletions
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/AbstractDngDecompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/AbstractDngDecompressor.cpp
index 8cfe1e17b..18173edc5 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/AbstractDngDecompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/AbstractDngDecompressor.cpp
@@ -20,7 +20,7 @@
#include "rawspeedconfig.h" // for HAVE_JPEG, HAVE_...
#include "decompressors/AbstractDngDecompressor.h"
-#include "common/Common.h" // for BitOrder::BitOrd...
+#include "common/Common.h" // for BitOrder_LSB
#include "common/Point.h" // for iPoint2D
#include "common/RawImage.h" // for RawImageData
#include "decoders/RawDecoderException.h" // for RawDecoderException
@@ -28,11 +28,10 @@
#include "decompressors/JpegDecompressor.h" // for JpegDecompressor
#include "decompressors/LJpegDecompressor.h" // for LJpegDecompressor
#include "decompressors/UncompressedDecompressor.h" // for UncompressedDeco...
-#include "io/Buffer.h" // for Buffer (ptr only)
+#include "decompressors/VC5Decompressor.h" // for VC5Decompressor
#include "io/ByteStream.h" // for ByteStream
#include "io/Endianness.h" // for Endianness, Endi...
-#include "io/IOException.h" // for IOException
-#include "tiff/TiffIFD.h" // for getTiffByteOrder
+#include "io/IOException.h" // for IOException, Thr...
#include <cassert> // for assert
#include <cstdio> // for size_t
#include <limits> // for numeric_limits
@@ -111,7 +110,7 @@ void AbstractDngDecompressor::decompressThreaded(
/* Deflate compression */
} else if (compression == 8) {
#ifdef HAVE_ZLIB
- std::unique_ptr<unsigned char[]> uBuffer;
+ std::unique_ptr<unsigned char[]> uBuffer; // NOLINT
for (size_t i = t->start; i < t->end && i < slices.size(); i++) {
auto e = &slices[i];
@@ -130,6 +129,19 @@ void AbstractDngDecompressor::decompressThreaded(
"ZLIB is not present! Deflate compression will not be supported!"
ThrowRDE("deflate support is disabled.");
#endif
+ /* VC-5 */
+ } else if (compression == 9) {
+ for (size_t i = t->start; i < t->end && i < slices.size(); i++) {
+ auto e = &slices[i];
+ VC5Decompressor d(e->bs, mRaw);
+ try {
+ d.decode(e->offX, e->offY, e->width, e->height);
+ } catch (RawDecoderException& err) {
+ mRaw->setError(err.what());
+ } catch (IOException& err) {
+ mRaw->setError(err.what());
+ }
+ }
/* Lossy DNG */
} else if (compression == 0x884c) {
#ifdef HAVE_JPEG
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/AbstractDngDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/AbstractDngDecompressor.h
index 44b6545ea..626ffeaa6 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/AbstractDngDecompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/AbstractDngDecompressor.h
@@ -21,6 +21,7 @@
#pragma once
#include "common/Common.h" // for uint32
+#include "common/Point.h" // for iPoint2D
#include "decompressors/AbstractParallelizedDecompressor.h" // for Abstract...
#include "io/ByteStream.h" // for ByteStream
#include <cassert> // for assert
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/AbstractHuffmanTable.h b/src/external/rawspeed/src/librawspeed/decompressors/AbstractHuffmanTable.h
new file mode 100644
index 000000000..a62c1e9a3
--- /dev/null
+++ b/src/external/rawspeed/src/librawspeed/decompressors/AbstractHuffmanTable.h
@@ -0,0 +1,247 @@
+/*
+ RawSpeed - RAW file decoder.
+
+ Copyright (C) 2017 Axel Waggershauser
+ Copyright (C) 2017-2018 Roman Lebedev
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#pragma once
+
+#include "common/Common.h" // for uchar8, uint32, ushort16
+#include "decoders/RawDecoderException.h" // for ThrowRDE
+#include "io/Buffer.h" // for Buffer
+#include <algorithm> // for copy, adjacent_find, max_e...
+#include <cassert> // for assert
+#include <cstddef> // for size_t
+#include <functional> // for less, less_equal
+#include <iterator> // for back_insert_iterator, back...
+#include <numeric> // for accumulate
+#include <vector> // for vector, operator==
+
+namespace rawspeed {
+
+class AbstractHuffmanTable {
+public:
+ struct CodeSymbol final {
+ ushort16 code; // the code (bit pattern found inside the stream)
+ uchar8 code_len; // the code length in bits, valid values are 1..16
+
+ CodeSymbol() = default;
+
+ CodeSymbol(ushort16 code_, uchar8 code_len_)
+ : code(code_), code_len(code_len_) {
+ assert(code_len > 0);
+ assert(code_len <= 16);
+ assert(code <= ((1U << code_len) - 1U));
+ }
+
+ static bool HaveCommonPrefix(const CodeSymbol& symbol,
+ const CodeSymbol& partial) {
+ assert(partial.code_len <= symbol.code_len);
+
+ auto getNHighBits = [](const CodeSymbol& s, unsigned bits) -> ushort16 {
+ const auto shift = s.code_len - bits;
+ return s.code >> shift;
+ };
+
+ const auto s0 = getNHighBits(symbol, partial.code_len);
+ const auto s1 = partial.code;
+
+ return s0 == s1;
+ }
+ };
+
+protected:
+ inline size_t __attribute__((pure)) maxCodePlusDiffLength() const {
+ return nCodesPerLength.size() - 1 +
+ *(std::max_element(codeValues.cbegin(), codeValues.cend()));
+ }
+
+ // These two fields directly represent the contents of a JPEG DHT field
+
+ // 1. The number of codes there are per bit length, this is index 1 based.
+ // (there are always 0 codes of length 0)
+ std::vector<unsigned int> nCodesPerLength; // index is length of code
+
+ inline unsigned int __attribute__((pure)) maxCodesCount() const {
+ return std::accumulate(nCodesPerLength.begin(), nCodesPerLength.end(), 0U);
+ }
+
+ // 2. This is the actual huffman encoded data, i.e. the 'alphabet'. Each value
+ // is the number of bits following the code that encode the difference to the
+ // last pixel. Valid values are in the range 0..16.
+ // signExtended() is used to decode the difference bits to a signed int.
+ std::vector<uchar8> codeValues; // index is just sequential number
+
+ static void VerifyCodeSymbols(const std::vector<CodeSymbol>& symbols) {
+#ifndef NDEBUG
+ // The code symbols are ordered so that all the code values are strictly
+ // increasing and code lenghts are not decreasing.
+ const auto symbolSort = [](const CodeSymbol& lhs,
+ const CodeSymbol& rhs) -> bool {
+ return std::less<>()(lhs.code, rhs.code) &&
+ std::less_equal<>()(lhs.code_len, rhs.code_len);
+ };
+#endif
+ assert(std::adjacent_find(symbols.cbegin(), symbols.cend(),
+ [&symbolSort](const CodeSymbol& lhs,
+ const CodeSymbol& rhs) -> bool {
+ return !symbolSort(lhs, rhs);
+ }) == symbols.cend() &&
+ "all code symbols are globally ordered");
+
+ // No two symbols should have the same prefix (high bytes)
+ // Only analyze the lower triangular matrix, excluding diagonal
+ for (auto sId = 0UL; sId < symbols.size(); sId++) {
+ for (auto pId = 0UL; pId < sId; pId++)
+ assert(!CodeSymbol::HaveCommonPrefix(symbols[sId], symbols[pId]));
+ }
+ }
+
+ std::vector<CodeSymbol> generateCodeSymbols() const {
+ std::vector<CodeSymbol> symbols;
+
+ assert(!nCodesPerLength.empty());
+ assert(maxCodesCount() > 0);
+
+ const auto maxCodeLength = nCodesPerLength.size() - 1U;
+ assert(codeValues.size() == maxCodesCount());
+
+ // reserve all the memory. avoids lots of small allocs
+ symbols.reserve(maxCodesCount());
+
+ // Figure C.1: make table of Huffman code length for each symbol
+ // Figure C.2: generate the codes themselves
+ uint32 code = 0;
+ for (unsigned int l = 1; l <= maxCodeLength; ++l) {
+ for (unsigned int i = 0; i < nCodesPerLength[l]; ++i) {
+ assert(code <= 0xffff);
+
+ symbols.emplace_back(code, l);
+ code++;
+ }
+
+ code <<= 1;
+ }
+
+ assert(symbols.size() == maxCodesCount());
+ VerifyCodeSymbols(symbols);
+
+ return symbols;
+ }
+
+public:
+ bool operator==(const AbstractHuffmanTable& other) const {
+ return nCodesPerLength == other.nCodesPerLength &&
+ codeValues == other.codeValues;
+ }
+
+ uint32 setNCodesPerLength(const Buffer& data) {
+ assert(data.getSize() == 16);
+
+ nCodesPerLength.resize(17, 0);
+ std::copy(data.begin(), data.end(), &nCodesPerLength[1]);
+ assert(nCodesPerLength[0] == 0);
+
+ // trim empty entries from the codes per length table on the right
+ while (!nCodesPerLength.empty() && nCodesPerLength.back() == 0)
+ nCodesPerLength.pop_back();
+
+ if (nCodesPerLength.empty())
+ ThrowRDE("Codes-per-length table is empty");
+
+ assert(nCodesPerLength.back() > 0);
+
+ const auto count = maxCodesCount();
+ assert(count > 0);
+
+ if (count > 162)
+ ThrowRDE("Too big code-values table");
+
+ // We are at the Root node, len is 1, there are two possible child Nodes
+ unsigned maxCodes = 2;
+
+ for (auto codeLen = 1UL; codeLen < nCodesPerLength.size(); codeLen++) {
+ // we have codeLen bits. make sure that that code count can actually fit
+ // E.g. for len 1 we could have two codes: 0b0 and 0b1
+ // (but in that case there can be no other codes (with higher lenghts))
+ const auto maxCodesInCurrLen = (1U << codeLen);
+ const auto nCodes = nCodesPerLength[codeLen];
+ if (nCodes > maxCodesInCurrLen) {
+ ThrowRDE("Corrupt Huffman. Can never have %u codes in %lu-bit len",
+ nCodes, codeLen);
+ }
+
+ // Also, check that we actually can have this much leafs for this lenght
+ if (nCodes > maxCodes) {
+ ThrowRDE(
+ "Corrupt Huffman. Can only fit %u out of %u codes in %lu-bit len",
+ maxCodes, nCodes, codeLen);
+ }
+
+ // There are nCodes leafs on this level, and those can not be branches
+ maxCodes -= nCodes;
+ // On the next level, rest can be branches, and can have two child Nodes
+ maxCodes *= 2;
+ }
+
+ return count;
+ }
+
+ void setCodeValues(const Buffer& data) {
+ // spec says max 16 but Hasselblad ignores that -> allow 17
+ // Canon's old CRW really ignores this ...
+ assert(data.getSize() <= 162);
+ assert(data.getSize() == maxCodesCount());
+
+ codeValues.clear();
+ codeValues.reserve(maxCodesCount());
+ std::copy(data.begin(), data.end(), std::back_inserter(codeValues));
+ assert(codeValues.size() == maxCodesCount());
+
+ for (const auto cValue : codeValues) {
+ if (cValue > 16)
+ ThrowRDE("Corrupt Huffman. Code value %u is bigger than 16", cValue);
+ }
+ }
+
+ // WARNING: the caller should check that len != 0 before calling the function
+ inline static int __attribute__((const))
+ signExtended(uint32 diff, uint32 len) {
+ int32 ret = diff;
+#if 0
+#define _X(x) (1 << x) - 1
+ constexpr static int offset[16] = {
+ 0, _X(1), _X(2), _X(3), _X(4), _X(5), _X(6), _X(7),
+ _X(8), _X(9), _X(10), _X(11), _X(12), _X(13), _X(14), _X(15)};
+#undef _X
+ if ((diff & (1 << (len - 1))) == 0)
+ ret -= offset[len];
+#else
+ if ((diff & (1 << (len - 1))) == 0)
+ ret -= (1 << len) - 1;
+#endif
+ return ret;
+ }
+};
+
+inline bool operator==(const AbstractHuffmanTable::CodeSymbol& lhs,
+ const AbstractHuffmanTable::CodeSymbol& rhs) {
+ return lhs.code == rhs.code && lhs.code_len == rhs.code_len;
+}
+
+} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/AbstractLJpegDecompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/AbstractLJpegDecompressor.cpp
index 442b6c9a7..a56c3056d 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/AbstractLJpegDecompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/AbstractLJpegDecompressor.cpp
@@ -21,15 +21,18 @@
*/
#include "decompressors/AbstractLJpegDecompressor.h"
-#include "common/Common.h" // for uint32, make_unique, uchar8
-#include "common/Point.h" // for iPoint2D
-#include "decoders/RawDecoderException.h" // for ThrowRDE
-#include "decompressors/HuffmanTable.h" // for HuffmanTable
-#include "io/ByteStream.h" // for ByteStream
-#include <array> // for array
-#include <memory> // for unique_ptr, allocator
-#include <utility> // for move
-#include <vector> // for vector
+#include "common/Common.h" // for uint32, uchar8
+#include "common/Point.h" // for iPoint2D
+#include "decoders/RawDecoderException.h" // for ThrowRDE
+#include "decompressors/AbstractHuffmanTable.h" // for AbstractHuffmanTable
+#include "decompressors/HuffmanTable.h" // for HuffmanTable, Huffma...
+#include "io/ByteStream.h" // for ByteStream
+#include "io/Endianness.h" // for Endianness, Endianne...
+#include <array> // for array
+#include <cassert> // for assert
+#include <memory> // for unique_ptr, make_unique
+#include <utility> // for move
+#include <vector> // for vector
namespace rawspeed {
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/AbstractLJpegDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/AbstractLJpegDecompressor.h
index 74a7941af..53a6d8d46 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/AbstractLJpegDecompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/AbstractLJpegDecompressor.h
@@ -26,13 +26,10 @@
#include "decoders/RawDecoderException.h" // for ThrowRDE
#include "decompressors/AbstractDecompressor.h" // for AbstractDecompressor
#include "decompressors/HuffmanTable.h" // for HuffmanTable
-#include "io/Buffer.h" // for Buffer, Buffer::size_type
#include "io/ByteStream.h" // for ByteStream
-#include "io/Endianness.h" // for getHostEndianness, Endiannes...
-#include <array> // for array
-#include <memory> // for unique_ptr
-#include <utility> // for move
-#include <vector> // for vector
+#include <array> // for array
+#include <memory> // for unique_ptr
+#include <vector> // for vector
/*
* The following enum and two structs are stolen from the IJG JPEG library
@@ -118,20 +115,20 @@ struct JpegComponentInfo {
* These values are fixed over the whole image.
* They are read from the SOF marker.
*/
- uint32 componentId = -1; /* identifier for this component (0..255) */
+ uint32 componentId = ~0U; /* identifier for this component (0..255) */
/*
* Huffman table selector (0..3). The value may vary
* between scans. It is read from the SOS marker.
*/
- uint32 dcTblNo = -1;
- uint32 superH = -1; // Horizontal Supersampling
- uint32 superV = -1; // Vertical Supersampling
+ uint32 dcTblNo = ~0U;
+ uint32 superH = ~0U; // Horizontal Supersampling
+ uint32 superV = ~0U; // Vertical Supersampling
};
class SOFInfo {
public:
- JpegComponentInfo compInfo[4];
+ std::array<JpegComponentInfo, 4> compInfo;
uint32 w = 0; // Width
uint32 h = 0; // Height
uint32 cps = 0; // Components
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/AbstractParallelizedDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/AbstractParallelizedDecompressor.h
index 36fbdffd8..d9dbd4d9e 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/AbstractParallelizedDecompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/AbstractParallelizedDecompressor.h
@@ -73,7 +73,7 @@ public:
return nullptr;
}
- uint32 taskNo = -1;
+ uint32 taskNo = ~0U;
const uint32 tasksTotal;
uint32 start = 0;
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/BinaryHuffmanTree.h b/src/external/rawspeed/src/librawspeed/decompressors/BinaryHuffmanTree.h
new file mode 100644
index 000000000..3660e1247
--- /dev/null
+++ b/src/external/rawspeed/src/librawspeed/decompressors/BinaryHuffmanTree.h
@@ -0,0 +1,237 @@
+/*
+ RawSpeed - RAW file decoder.
+
+ Copyright (C) 2018 Roman Lebedev
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#pragma once
+
+#include <cassert> // for assert
+#include <initializer_list> // IWYU pragma: keep
+#include <memory> // for unique_ptr
+#include <vector> // for vector
+
+namespace rawspeed {
+
+template <typename T>
+class BinaryHuffmanTree final /* : public BinarySearchTree */ {
+public:
+ struct Branch;
+ struct Leaf;
+
+ struct Node {
+ enum class Type { Branch, Leaf };
+
+ explicit virtual operator Type() const = 0;
+
+ Branch& getAsBranch() {
+ assert(Node::Type::Branch == static_cast<Node::Type>(*this));
+ return static_cast<Branch&>(*this);
+ }
+
+ Leaf& getAsLeaf() {
+ assert(Node::Type::Leaf == static_cast<Node::Type>(*this));
+ return static_cast<Leaf&>(*this);
+ }
+
+ virtual ~Node() = default;
+ };
+
+ struct Branch final : public Node {
+ explicit operator typename Node::Type() const override {
+ return Node::Type::Branch;
+ }
+
+ std::unique_ptr<Node> zero;
+ std::unique_ptr<Node> one;
+
+ template <typename Lambda> bool forEachNode(Lambda l) const;
+ template <typename Lambda> bool forEachNode(Lambda l);
+
+ bool hasLeafs() const;
+
+ static bool pruneLeaflessBranches(std::unique_ptr<Node>* n);
+ };
+
+ struct Leaf final : public Node {
+ explicit operator typename Node::Type() const override {
+ return Node::Type::Leaf;
+ }
+
+ T value;
+
+ Leaf() = default;
+
+ explicit Leaf(T value_) : value(value_) {}
+ };
+
+ std::unique_ptr<Node> root;
+
+ std::vector<Branch*> getAllBranchesOfDepth(int depth);
+ std::vector<std::unique_ptr<Node>*> getAllVacantNodesAtDepth(int depth);
+ void pruneLeaflessBranches();
+};
+
+template <typename T>
+template <typename Lambda>
+bool BinaryHuffmanTree<T>::Branch::forEachNode(Lambda l) const {
+ bool done = false;
+ // NOTE: The order *IS* important! Left to right, zero to one!
+ for (const auto* node : {&zero, &one}) {
+ done = l(node);
+ if (done)
+ return done;
+ }
+ return done;
+}
+
+template <typename T>
+template <typename Lambda>
+bool BinaryHuffmanTree<T>::Branch::forEachNode(Lambda l) {
+ bool done = false;
+ // NOTE: The order *IS* important! Left to right, zero to one!
+ for (auto* node : {&zero, &one}) {
+ done = l(node);
+ if (done)
+ return done;
+ }
+ return done;
+}
+
+template <typename T> bool BinaryHuffmanTree<T>::Branch::hasLeafs() const {
+ return forEachNode([](const std::unique_ptr<Node>* n) {
+ assert(n);
+ if (!(*n)) // If the node is empty, then it certainly does not have leafs
+ return false;
+ return Node::Type::Leaf == static_cast<typename Node::Type>(**n);
+ });
+}
+
+template <typename T>
+bool BinaryHuffmanTree<T>::Branch::pruneLeaflessBranches(
+ std::unique_ptr<Node>* top) {
+ if (!top)
+ return false;
+
+ bool foundLeafs = false; // Any leafs in this branch?
+ (*top)->getAsBranch().forEachNode([&foundLeafs](std::unique_ptr<Node>* n) {
+ assert(n);
+ if (!(*n))
+ return false; // Nothing to do here, node is empty already, keep going.
+ switch (static_cast<typename Node::Type>(**n)) {
+ case Node::Type::Branch:
+ // Recurse. Any leafs in this branch?
+ if (Branch::pruneLeaflessBranches(n))
+ foundLeafs = true;
+ else
+ n->reset(); // Aha, dead branch, prune it!
+ break;
+ case Node::Type::Leaf:
+ foundLeafs = true; // Ok, this is a Leaf, great.
+ break;
+ }
+ return false; // keep going.
+ });
+
+ if (!foundLeafs)
+ top->reset();
+
+ return foundLeafs;
+}
+
+template <typename T>
+std::vector<typename BinaryHuffmanTree<T>::Branch*>
+BinaryHuffmanTree<T>::getAllBranchesOfDepth(int depth) {
+ assert(depth >= 0);
+
+ if (0 == depth) {
+ // The root (depth == 0) is is special, and is *always* a Branch.
+ if (!root)
+ root = std::make_unique<Branch>();
+ return {&root->getAsBranch()};
+ }
+
+ // Recursively get all branches of previous depth
+ auto prevBranches = getAllBranchesOfDepth(depth - 1);
+
+ // Early return in case of no branches on previous depth
+ if (prevBranches.empty())
+ return {};
+
+ // We will have at most twice as much branches as at the previous depth.
+ decltype(prevBranches) branches;
+ branches.reserve(2U * prevBranches.size());
+
+ for (const auto& prevBranch : prevBranches) {
+ assert(prevBranch);
+
+ prevBranch->forEachNode([&branches](std::unique_ptr<Node>* n) {
+ assert(n);
+ // If the Node is vacant, make it a branch.
+ // The user was supposed to create all the required Leafs before.
+ // We shall prune Leaf-less branches at the end
+ if (!(*n))
+ *n = std::make_unique<Branch>();
+ // If this is a branch, add it to the list.
+ if (Node::Type::Branch == static_cast<typename Node::Type>(**n))
+ branches.emplace_back(&((*n)->getAsBranch()));
+ return false; // keep going;
+ });
+ }
+ assert(branches.size() <= 2U * prevBranches.size());
+
+ return branches;
+}
+
+template <typename T>
+std::vector<std::unique_ptr<typename BinaryHuffmanTree<T>::Node>*>
+BinaryHuffmanTree<T>::getAllVacantNodesAtDepth(int depth) {
+ assert(depth > 0);
+
+ // Get all branches of previous depth
+ auto prevBranches = getAllBranchesOfDepth(depth - 1);
+
+ // Early return in case of no branches on previous depth
+ if (prevBranches.empty())
+ return {};
+
+ // We will have at most two nodes per each branch on the previous depth.
+ std::vector<std::unique_ptr<BinaryHuffmanTree<T>::Node>*> nodes;
+ nodes.reserve(2U * prevBranches.size());
+
+ for (const auto& prevBranch : prevBranches) {
+ assert(prevBranch);
+
+ auto& b = prevBranch->getAsBranch();
+
+ b.forEachNode([&nodes](std::unique_ptr<Node>* n) {
+ assert(n);
+ if (!(*n)) // If there is no node already, then record it.
+ nodes.emplace_back(n);
+ return false; // keep going;
+ });
+ }
+ assert(nodes.size() <= 2U * prevBranches.size());
+
+ return nodes;
+}
+
+template <typename T> void BinaryHuffmanTree<T>::pruneLeaflessBranches() {
+ Branch::pruneLeaflessBranches(&root);
+}
+
+} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/CMakeLists.txt b/src/external/rawspeed/src/librawspeed/decompressors/CMakeLists.txt
index 48b12413a..fa48ebaf8 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/CMakeLists.txt
+++ b/src/external/rawspeed/src/librawspeed/decompressors/CMakeLists.txt
@@ -2,11 +2,13 @@ FILE(GLOB SOURCES
"AbstractDecompressor.h"
"AbstractDngDecompressor.cpp"
"AbstractDngDecompressor.h"
+ "AbstractHuffmanTable.h"
"AbstractLJpegDecompressor.cpp"
"AbstractLJpegDecompressor.h"
"AbstractParallelizedDecompressor.cpp"
"AbstractParallelizedDecompressor.h"
"AbstractSamsungDecompressor.h"
+ "BinaryHuffmanTree.h"
"Cr2Decompressor.cpp"
"Cr2Decompressor.h"
"CrwDecompressor.cpp"
@@ -18,6 +20,10 @@ FILE(GLOB SOURCES
"HasselbladDecompressor.cpp"
"HasselbladDecompressor.h"
"HuffmanTable.h"
+ "HuffmanTableLUT.h"
+ "HuffmanTableLookup.h"
+ "HuffmanTableTree.h"
+ "HuffmanTableVector.h"
"JpegDecompressor.cpp"
"JpegDecompressor.h"
"KodakDecompressor.cpp"
@@ -30,8 +36,12 @@ FILE(GLOB SOURCES
"OlympusDecompressor.h"
"PanasonicDecompressor.cpp"
"PanasonicDecompressor.h"
+ "PanasonicDecompressorV5.cpp"
+ "PanasonicDecompressorV5.h"
"PentaxDecompressor.cpp"
"PentaxDecompressor.h"
+ "PhaseOneDecompressor.cpp"
+ "PhaseOneDecompressor.h"
"SamsungV0Decompressor.cpp"
"SamsungV0Decompressor.h"
"SamsungV1Decompressor.cpp"
@@ -44,6 +54,8 @@ FILE(GLOB SOURCES
"SonyArw2Decompressor.h"
"UncompressedDecompressor.cpp"
"UncompressedDecompressor.h"
+ "VC5Decompressor.cpp"
+ "VC5Decompressor.h"
)
target_sources(rawspeed PRIVATE
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/Cr2Decompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/Cr2Decompressor.cpp
index e43a0ed54..bce287cfb 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/Cr2Decompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/Cr2Decompressor.cpp
@@ -21,19 +21,21 @@
*/
#include "decompressors/Cr2Decompressor.h"
-#include "common/Common.h" // for uint32, unroll_loop, ushort16
-#include "common/Point.h" // for iPoint2D
+#include "common/Common.h" // for unroll_loop, uint32, ushort16
+#include "common/Point.h" // for iPoint2D, iPoint2D::area_type
#include "common/RawImage.h" // for RawImage, RawImageData
#include "decoders/RawDecoderException.h" // for ThrowRDE
-#include "io/BitPumpJPEG.h" // for BitPumpJPEG
-#include <algorithm> // for move, copy_n
+#include "io/BitPumpJPEG.h" // for BitPumpJPEG, BitStream<>::...
+#include <algorithm> // for copy_n
#include <cassert> // for assert
-#include <numeric> // for accumulate
+#include <initializer_list> // for initializer_list
using std::copy_n;
namespace rawspeed {
+class ByteStream;
+
Cr2Decompressor::Cr2Decompressor(const ByteStream& bs, const RawImage& img)
: AbstractLJpegDecompressor(bs, img) {
if (mRaw->getDataType() != TYPE_USHORT16)
@@ -206,6 +208,9 @@ void Cr2Decompressor::decodeN_X_Y()
if (X_S_F == 1) {
if (destX + sliceWidth > static_cast<unsigned>(mRaw->dim.x))
ThrowRDE("Bad slice width / frame size / image size combination.");
+ if (((sliceId + 1) == slicing.numSlices) &&
+ ((destX + sliceWidth) < static_cast<unsigned>(mRaw->dim.x)))
+ ThrowRDE("Unsufficient slices - do not fill the entire image");
} else {
// FIXME.
}
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/Cr2Decompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/Cr2Decompressor.h
index 4607f3ed8..9165212c6 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/Cr2Decompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/Cr2Decompressor.h
@@ -21,18 +21,15 @@
#pragma once
-#include "common/Common.h" // ushort16
+#include "common/Common.h" // for ushort16
#include "decoders/RawDecoderException.h" // for ThrowRDE
#include "decompressors/AbstractLJpegDecompressor.h" // for AbstractLJpegDe...
-#include "io/Buffer.h" // for Buffer, Buffer:...
-#include "io/ByteStream.h" // for ByteStream
#include <cassert> // for assert
namespace rawspeed {
+class ByteStream;
class RawImage;
-class Cr2Decompressor;
-// Decompresses Lossless JPEGs, with 2-4 components and optional X/Y subsampling
class Cr2Slicing {
int numSlices = 0;
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/CrwDecompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/CrwDecompressor.cpp
index ce9ccd874..6457568cb 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/CrwDecompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/CrwDecompressor.cpp
@@ -21,17 +21,16 @@
*/
#include "decompressors/CrwDecompressor.h"
-#include "common/Common.h" // for uint32, ushort16, uchar8
+#include "common/Common.h" // for uint32, uchar8, ushort16
#include "common/Point.h" // for iPoint2D
#include "common/RawImage.h" // for RawImage, RawImageData
-#include "decoders/RawDecoderException.h" // for RawDecoderException (ptr o...
+#include "decoders/RawDecoderException.h" // for ThrowRDE
#include "decompressors/HuffmanTable.h" // for HuffmanTable
#include "io/BitPumpJPEG.h" // for BitPumpJPEG, BitStream<>::...
#include "io/Buffer.h" // for Buffer
#include "io/ByteStream.h" // for ByteStream
-#include <algorithm> // for min
-#include <array> // for array
-#include <memory> // for make_unique
+#include <array> // for array, array<>::value_type
+#include <cassert> // for assert
using std::array;
@@ -85,32 +84,32 @@ CrwDecompressor::crw_hts CrwDecompressor::initHuffTables(uint32 table) {
ThrowRDE("Wrong table number: %u", table);
// NCodesPerLength
- static const uchar8 first_tree_ncpl[3][16] = {
+ static const std::array<std::array<uchar8, 16>, 3> first_tree_ncpl = {{
{0, 1, 4, 2, 3, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 2, 2, 3, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 6, 3, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- };
+ }};
- static const uchar8 first_tree_len[3][13] = {
+ static const std::array<std::array<uchar8, 13>, 3> first_tree_len = {{
{0x4, 0x3, 0x5, 0x6, 0x2, 0x7, 0x1, 0x8, 0x9, 0x0, 0xa, 0xb, 0xf},
{0x3, 0x2, 0x4, 0x1, 0x5, 0x0, 0x6, 0x7, 0x9, 0x8, 0xa, 0xb, 0xf},
{0x6, 0x5, 0x7, 0x4, 0x8, 0x3, 0x9, 0x2, 0x0, 0xa, 0x1, 0xb, 0xf},
- };
+ }};
- static const uchar8 first_tree_index[3][13] = {
+ static const std::array<std::array<uchar8, 13>, 3> first_tree_index = {{
{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf},
{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf},
{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf},
- };
+ }};
// NCodesPerLength
- static const uchar8 second_tree_ncpl[3][16] = {
+ static const std::array<std::array<uchar8, 16>, 3> second_tree_ncpl = {{
{0, 2, 2, 2, 1, 4, 2, 1, 2, 5, 1, 1, 0, 0, 0, 139},
{0, 2, 2, 1, 4, 1, 4, 1, 3, 3, 1, 0, 0, 0, 0, 140},
{0, 0, 6, 2, 1, 3, 3, 2, 5, 1, 2, 2, 8, 10, 0, 117},
- };
+ }};
- static const uchar8 second_tree_len[3][164] = {
+ static const std::array<std::array<uchar8, 164>, 3> second_tree_len = {{
{0x3, 0x4, 0x2, 0x5, 0x1, 0x6, 0x7, 0x8, 0x2, 0x3, 0x1, 0x4, 0x9, 0x5,
0x2, 0x0, 0x1, 0x6, 0xa, 0x0, 0x3, 0x7, 0x4, 0x1, 0x2, 0x8, 0x9, 0x3,
0x5, 0x1, 0x4, 0x2, 0x5, 0x1, 0x6, 0x7, 0x8, 0x9, 0x9, 0x6, 0xa, 0x9,
@@ -147,9 +146,9 @@ CrwDecompressor::crw_hts CrwDecompressor::initHuffTables(uint32 table) {
0x5, 0x2, 0x3, 0x5, 0x2, 0x4, 0x4, 0xa, 0x4, 0x5, 0x3, 0x2, 0x1, 0x5,
0x3, 0xa, 0x4, 0xa, 0x2, 0x1, 0x4, 0x1, 0x3, 0x3, 0xa, 0x3, 0x2, 0x2,
0x1, 0x3, 0x2, 0x1, 0x1, 0x3, 0x2, 0x1, 0xf, 0xf},
- };
+ }};
- static const uchar8 second_tree_index[3][164] = {
+ static const std::array<std::array<uchar8, 164>, 3> second_tree_index = {{
{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x1, 0x0, 0x1,
0x2, 0x0, 0x2, 0x1, 0x0, 0xf, 0x2, 0x1, 0x2, 0x3, 0x3, 0x1, 0x1, 0x3,
0x2, 0x4, 0x3, 0x4, 0x3, 0x5, 0x3, 0x3, 0x3, 0x2, 0x7, 0x2, 0x1, 0x3,
@@ -186,13 +185,17 @@ CrwDecompressor::crw_hts CrwDecompressor::initHuffTables(uint32 table) {
0x8, 0x6, 0xf, 0xc, 0xb, 0xa, 0x8, 0xb, 0x6, 0xa, 0xb, 0xd, 0x8, 0xe,
0xd, 0xa, 0xc, 0xc, 0xf, 0xb, 0xe, 0xd, 0x8, 0x6, 0xe, 0xc, 0xe, 0x8,
0xf, 0xa, 0xc, 0xa, 0xc, 0xe, 0xa, 0xe, 0xf, 0xf},
- };
+ }};
array<array<HuffmanTable, 2>, 2> mHuff = {{
- {{makeDecoder(first_tree_ncpl[table], first_tree_len[table]),
- makeDecoder(first_tree_ncpl[table], first_tree_index[table])}},
- {{makeDecoder(second_tree_ncpl[table], second_tree_len[table]),
- makeDecoder(second_tree_ncpl[table], second_tree_index[table])}},
+ {{makeDecoder(first_tree_ncpl[table].data(),
+ first_tree_len[table].data()),
+ makeDecoder(first_tree_ncpl[table].data(),
+ first_tree_index[table].data())}},
+ {{makeDecoder(second_tree_ncpl[table].data(),
+ second_tree_len[table].data()),
+ makeDecoder(second_tree_ncpl[table].data(),
+ second_tree_index[table].data())}},
}};
return mHuff;
@@ -224,6 +227,7 @@ inline void CrwDecompressor::decodeBlock(std::array<int, 64>* diffBuf,
continue;
int diff = lPump->getBits(len);
+ iPump->fill(len);
iPump->skipBits(len);
if (i >= 64)
@@ -255,7 +259,7 @@ void CrwDecompressor::decompress() {
BitPumpJPEG iPump(rawInput);
int carry = 0;
- int base[2];
+ std::array<int, 2> base;
uint32 j = 0;
ushort16* dest = nullptr;
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/CrwDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/CrwDecompressor.h
index f67aca04e..2f10ff9e7 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/CrwDecompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/CrwDecompressor.h
@@ -22,15 +22,13 @@
#pragma once
-#include "common/Common.h" // for uint32, uchar8
+#include "common/Common.h" // for uchar8, uint32
#include "common/RawImage.h" // for RawImage
#include "decompressors/AbstractDecompressor.h" // for AbstractDecompressor
#include "decompressors/HuffmanTable.h" // for HuffmanTable
#include "io/BitPumpJPEG.h" // for BitPumpJPEG
-#include "io/Buffer.h" // for Buffer
#include "io/ByteStream.h" // for ByteStream
#include <array> // for array
-#include <memory> // for unique_ptr
namespace rawspeed {
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/DeflateDecompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/DeflateDecompressor.cpp
index 9553bd9f8..a996c856b 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/DeflateDecompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/DeflateDecompressor.cpp
@@ -24,7 +24,6 @@
#include "decompressors/DeflateDecompressor.h"
#include "common/Common.h" // for uint32, ushort16
-#include "common/Point.h" // for iPoint2D
#include "decoders/RawDecoderException.h" // for ThrowRDE
#include "io/Endianness.h" // for getHostEndianness, Endiann...
#include <cassert> // for assert
@@ -40,7 +39,10 @@ static inline void decodeFPDeltaRow(unsigned char* src, unsigned char* dst,
unsigned int bytesps, int factor) {
// DecodeDeltaBytes
for (size_t col = factor; col < realTileWidth * bytesps; ++col) {
- src[col] += src[col - factor];
+ // Yes, this is correct, and is symmetrical with EncodeDeltaBytes in
+ // hdrmerge, and they both combined are lossless.
+ // This is indeed working in modulo-2^n arighmetics.
+ src[col] = static_cast<unsigned char>(src[col] + src[col - factor]);
}
// Reorder bytes into the image
// 16 and 32-bit versions depend on local architecture, 24-bit does not
@@ -184,13 +186,15 @@ static inline void expandFP24(unsigned char* dst, int width) {
}
}
-void DeflateDecompressor::decode(std::unique_ptr<unsigned char[]>* uBuffer,
- int tileWidthMax, int tileHeightMax, int width,
- int height, uint32 offX, uint32 offY) {
+void DeflateDecompressor::decode(
+ std::unique_ptr<unsigned char[]>* uBuffer, // NOLINT
+ int tileWidthMax, int tileHeightMax, int width, int height, uint32 offX,
+ uint32 offY) {
uLongf dstLen = sizeof(float) * tileWidthMax * tileHeightMax;
if (!uBuffer->get())
- *uBuffer = std::unique_ptr<unsigned char[]>(new unsigned char[dstLen]);
+ *uBuffer =
+ std::unique_ptr<unsigned char[]>(new unsigned char[dstLen]); // NOLINT
const auto cSize = input.getRemainSize();
const unsigned char* cBuffer = input.getData(cSize);
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/DeflateDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/DeflateDecompressor.h
index 514110c3b..b2cc1a3d7 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/DeflateDecompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/DeflateDecompressor.h
@@ -20,14 +20,13 @@
#pragma once
-#include "rawspeedconfig.h"
+#include "rawspeedconfig.h" // for HAVE_ZLIB
#ifdef HAVE_ZLIB
-#include "common/Common.h" // for getHostEndianness, uint32, Endianness::big
-#include "common/RawImage.h" // for RawImage
+#include "common/Common.h" // for uint32
+#include "common/RawImage.h" // for RawImage
#include "decompressors/AbstractDecompressor.h" // for AbstractDecompressor
-#include "io/Buffer.h" // for Buffer, Buffer::size_type
#include "io/ByteStream.h" // for ByteStream
#include <memory> // for unique_ptr
#include <utility> // for move
@@ -45,9 +44,9 @@ public:
int bps_)
: input(std::move(bs)), mRaw(img), predictor(predictor_), bps(bps_) {}
- void decode(std::unique_ptr<unsigned char[]>* uBuffer, int tileWidthMax,
- int tileHeightMax, int width, int height, uint32 offX,
- uint32 offY);
+ void decode(std::unique_ptr<unsigned char[]>* uBuffer, // NOLINT
+ int tileWidthMax, int tileHeightMax, int width, int height,
+ uint32 offX, uint32 offY);
};
} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/FujiDecompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/FujiDecompressor.cpp
index 44459419a..1b1162047 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/FujiDecompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/FujiDecompressor.cpp
@@ -22,16 +22,17 @@
*/
#include "decompressors/FujiDecompressor.h"
-#include "common/Common.h" // for roundUpDiv...
+#include "common/Common.h" // for ushort16
+#include "common/Point.h" // for iPoint2D
#include "common/RawImage.h" // for RawImage
#include "decoders/RawDecoderException.h" // for ThrowRDE
#include "decompressors/AbstractParallelizedDecompressor.h" // for RawDecom...
#include "io/Endianness.h" // for Endianness
-#include "metadata/ColorFilterArray.h" // for CFAColor...
+#include "metadata/ColorFilterArray.h" // for CFA_BLUE
#include <algorithm> // for fill, min
-#include <cstdlib> // for abs
+#include <cmath> // for abs
+#include <cstdlib> // for abs, size_t
#include <cstring> // for memcpy
-// IWYU pragma: no_include <bits/std_abs.h>
namespace rawspeed {
@@ -172,9 +173,9 @@ template <typename T>
void FujiDecompressor::copy_line(fuji_compressed_block* info,
const FujiStrip& strip, int cur_line,
T&& idx) const {
- ushort16* lineBufB[3];
- ushort16* lineBufG[6];
- ushort16* lineBufR[3];
+ std::array<ushort16*, 3> lineBufB;
+ std::array<ushort16*, 6> lineBufG;
+ std::array<ushort16*, 3> lineBufR;
for (int i = 0; i < 3; i++) {
lineBufR[i] = info->linebuf[_R2 + i] + 1;
@@ -233,7 +234,8 @@ void FujiDecompressor::copy_line_to_bayer(fuji_compressed_block* info,
copy_line(info, strip, cur_line, index);
}
-void FujiDecompressor::fuji_zerobits(BitPumpMSB* pump, int* count) const {
+inline void FujiDecompressor::fuji_zerobits(BitPumpMSB* pump,
+ int* count) const {
uchar8 zero = 0;
*count = 0;
@@ -265,12 +267,10 @@ FujiDecompressor::bitDiff(int value1, int value2) const {
}
template <typename T1, typename T2>
-int FujiDecompressor::fuji_decode_sample(T1&& func_0, T2&& func_1,
- fuji_compressed_block* info,
- BitPumpMSB* pump, ushort16* line_buf,
- int* pos, int_pair* grads) const {
+void FujiDecompressor::fuji_decode_sample(
+ T1&& func_0, T2&& func_1, fuji_compressed_block* info, BitPumpMSB* pump,
+ ushort16* line_buf, int* pos, std::array<int_pair, 41>* grads) const {
int interp_val = 0;
- int errcnt = 0;
int sample = 0;
int code = 0;
@@ -284,7 +284,7 @@ int FujiDecompressor::fuji_decode_sample(T1&& func_0, T2&& func_1,
fuji_zerobits(pump, &sample);
if (sample < common_info.max_bits - common_info.raw_bits - 1) {
- int decBits = bitDiff(grads[gradient].value1, grads[gradient].value2);
+ int decBits = bitDiff((*grads)[gradient].value1, (*grads)[gradient].value2);
code = pump->getBits(decBits);
code += sample << decBits;
} else {
@@ -293,7 +293,7 @@ int FujiDecompressor::fuji_decode_sample(T1&& func_0, T2&& func_1,
}
if (code < 0 || code >= common_info.total_values) {
- errcnt++;
+ ThrowRDE("fuji_decode_sample");
}
if (code & 1) {
@@ -302,14 +302,14 @@ int FujiDecompressor::fuji_decode_sample(T1&& func_0, T2&& func_1,
code /= 2;
}
- grads[gradient].value1 += std::abs(code);
+ (*grads)[gradient].value1 += std::abs(code);
- if (grads[gradient].value2 == common_info.min_value) {
- grads[gradient].value1 >>= 1;
- grads[gradient].value2 >>= 1;
+ if ((*grads)[gradient].value2 == common_info.min_value) {
+ (*grads)[gradient].value1 >>= 1;
+ (*grads)[gradient].value2 >>= 1;
}
- grads[gradient].value2++;
+ (*grads)[gradient].value2++;
interp_val = func_1(grad, interp_val, code);
@@ -326,19 +326,16 @@ int FujiDecompressor::fuji_decode_sample(T1&& func_0, T2&& func_1,
}
*pos += 2;
-
- return errcnt;
}
#define fuji_quant_gradient(v1, v2) \
(9 * ci.q_table[ci.q_point[4] + (v1)] + ci.q_table[ci.q_point[4] + (v2)])
-int FujiDecompressor::fuji_decode_sample_even(fuji_compressed_block* info,
- BitPumpMSB* pump,
- ushort16* line_buf, int* pos,
- int_pair* grads) const {
+void FujiDecompressor::fuji_decode_sample_even(
+ fuji_compressed_block* info, BitPumpMSB* pump, ushort16* line_buf, int* pos,
+ std::array<int_pair, 41>* grads) const {
const auto& ci = common_info;
- return fuji_decode_sample(
+ fuji_decode_sample(
[&ci](const ushort16* line_buf_cur, int* interp_val, int* grad,
int* gradient) {
int Rb = line_buf_cur[-2 - ci.line_width];
@@ -376,12 +373,11 @@ int FujiDecompressor::fuji_decode_sample_even(fuji_compressed_block* info,
info, pump, line_buf, pos, grads);
}
-int FujiDecompressor::fuji_decode_sample_odd(fuji_compressed_block* info,
- BitPumpMSB* pump,
- ushort16* line_buf, int* pos,
- int_pair* grads) const {
+void FujiDecompressor::fuji_decode_sample_odd(
+ fuji_compressed_block* info, BitPumpMSB* pump, ushort16* line_buf, int* pos,
+ std::array<int_pair, 41>* grads) const {
const auto& ci = common_info;
- return fuji_decode_sample(
+ fuji_decode_sample(
[&ci](const ushort16* line_buf_cur, int* interp_val, int* grad,
int* gradient) {
int Ra = line_buf_cur[-1];
@@ -436,26 +432,26 @@ void FujiDecompressor::fuji_decode_interpolation_even(int line_width,
*pos += 2;
}
-void FujiDecompressor::fuji_extend_generic(ushort16* linebuf[_ltotal],
- int line_width, int start,
- int end) const {
+void FujiDecompressor::fuji_extend_generic(
+ std::array<ushort16*, _ltotal> linebuf, int line_width, int start,
+ int end) const {
for (int i = start; i <= end; i++) {
linebuf[i][0] = linebuf[i - 1][1];
linebuf[i][line_width + 1] = linebuf[i - 1][line_width];
}
}
-void FujiDecompressor::fuji_extend_red(ushort16* linebuf[_ltotal],
+void FujiDecompressor::fuji_extend_red(std::array<ushort16*, _ltotal> linebuf,
int line_width) const {
fuji_extend_generic(linebuf, line_width, _R2, _R4);
}
-void FujiDecompressor::fuji_extend_green(ushort16* linebuf[_ltotal],
+void FujiDecompressor::fuji_extend_green(std::array<ushort16*, _ltotal> linebuf,
int line_width) const {
fuji_extend_generic(linebuf, line_width, _G2, _G7);
}
-void FujiDecompressor::fuji_extend_blue(ushort16* linebuf[_ltotal],
+void FujiDecompressor::fuji_extend_blue(std::array<ushort16*, _ltotal> linebuf,
int line_width) const {
fuji_extend_generic(linebuf, line_width, _B2, _B4);
}
@@ -477,8 +473,6 @@ void FujiDecompressor::xtrans_decode_block(fuji_compressed_block* info,
ColorPos g;
ColorPos b;
- int errcnt = 0;
-
const int line_width = common_info.line_width;
// FIXME: GCC5 sucks.
@@ -492,10 +486,10 @@ void FujiDecompressor::xtrans_decode_block(fuji_compressed_block* info,
even_func(c0, c1, grad, c0_pos, c1_pos);
if (g.even > 8) {
- errcnt += fuji_decode_sample_odd(info, pump, info->linebuf[c0] + 1,
- &c0_pos.odd, info->grad_odd[grad]);
- errcnt += fuji_decode_sample_odd(info, pump, info->linebuf[c1] + 1,
- &c1_pos.odd, info->grad_odd[grad]);
+ fuji_decode_sample_odd(info, pump, info->linebuf[c0] + 1, &c0_pos.odd,
+ &(info->grad_odd[grad]));
+ fuji_decode_sample_odd(info, pump, info->linebuf[c1] + 1, &c1_pos.odd,
+ &(info->grad_odd[grad]));
}
}
};
@@ -505,8 +499,8 @@ void FujiDecompressor::xtrans_decode_block(fuji_compressed_block* info,
ColorPos& c1_pos) {
fuji_decode_interpolation_even(line_width, info->linebuf[c0] + 1,
&c0_pos.even);
- errcnt += fuji_decode_sample_even(info, pump, info->linebuf[c1] + 1,
- &c1_pos.even, info->grad_even[grad]);
+ fuji_decode_sample_even(info, pump, info->linebuf[c1] + 1, &c1_pos.even,
+ &(info->grad_even[grad]));
},
_R2, _G2, 0, r, g);
@@ -518,8 +512,8 @@ void FujiDecompressor::xtrans_decode_block(fuji_compressed_block* info,
pass(
[&](_xt_lines c0, _xt_lines c1, int grad, ColorPos& c0_pos,
ColorPos& c1_pos) {
- errcnt += fuji_decode_sample_even(info, pump, info->linebuf[c0] + 1,
- &c0_pos.even, info->grad_even[grad]);
+ fuji_decode_sample_even(info, pump, info->linebuf[c0] + 1, &c0_pos.even,
+ &(info->grad_even[grad]));
fuji_decode_interpolation_even(line_width, info->linebuf[c1] + 1,
&c1_pos.even);
},
@@ -535,9 +529,8 @@ void FujiDecompressor::xtrans_decode_block(fuji_compressed_block* info,
[&](_xt_lines c0, _xt_lines c1, int grad, ColorPos& c0_pos,
ColorPos& c1_pos) {
if (c0_pos.even & 3) {
- errcnt +=
- fuji_decode_sample_even(info, pump, info->linebuf[c0] + 1,
- &c0_pos.even, info->grad_even[grad]);
+ fuji_decode_sample_even(info, pump, info->linebuf[c0] + 1,
+ &c0_pos.even, &(info->grad_even[grad]));
} else {
fuji_decode_interpolation_even(line_width, info->linebuf[c0] + 1,
&c0_pos.even);
@@ -557,16 +550,15 @@ void FujiDecompressor::xtrans_decode_block(fuji_compressed_block* info,
pass(
[&](_xt_lines c0, _xt_lines c1, int grad, ColorPos& c0_pos,
ColorPos& c1_pos) {
- errcnt += fuji_decode_sample_even(info, pump, info->linebuf[c0] + 1,
- &c0_pos.even, info->grad_even[grad]);
+ fuji_decode_sample_even(info, pump, info->linebuf[c0] + 1, &c0_pos.even,
+ &(info->grad_even[grad]));
if ((c1_pos.even & 3) == 2) {
fuji_decode_interpolation_even(line_width, info->linebuf[c1] + 1,
&c1_pos.even);
} else {
- errcnt +=
- fuji_decode_sample_even(info, pump, info->linebuf[c1] + 1,
- &c1_pos.even, info->grad_even[grad]);
+ fuji_decode_sample_even(info, pump, info->linebuf[c1] + 1,
+ &c1_pos.even, &(info->grad_even[grad]));
}
},
_G5, _B3, 0, g, b);
@@ -584,13 +576,12 @@ void FujiDecompressor::xtrans_decode_block(fuji_compressed_block* info,
fuji_decode_interpolation_even(line_width, info->linebuf[c0] + 1,
&c0_pos.even);
} else {
- errcnt +=
- fuji_decode_sample_even(info, pump, info->linebuf[c0] + 1,
- &c0_pos.even, info->grad_even[grad]);
+ fuji_decode_sample_even(info, pump, info->linebuf[c0] + 1,
+ &c0_pos.even, &(info->grad_even[grad]));
}
- errcnt += fuji_decode_sample_even(info, pump, info->linebuf[c1] + 1,
- &c1_pos.even, info->grad_even[grad]);
+ fuji_decode_sample_even(info, pump, info->linebuf[c1] + 1, &c1_pos.even,
+ &(info->grad_even[grad]));
},
_R4, _G6, 1, r, g);
@@ -607,9 +598,8 @@ void FujiDecompressor::xtrans_decode_block(fuji_compressed_block* info,
&c0_pos.even);
if (c1_pos.even & 3) {
- errcnt +=
- fuji_decode_sample_even(info, pump, info->linebuf[c1] + 1,
- &c1_pos.even, info->grad_even[grad]);
+ fuji_decode_sample_even(info, pump, info->linebuf[c1] + 1,
+ &c1_pos.even, &(info->grad_even[grad]));
} else {
fuji_decode_interpolation_even(line_width, info->linebuf[c1] + 1,
&c1_pos.even);
@@ -619,9 +609,6 @@ void FujiDecompressor::xtrans_decode_block(fuji_compressed_block* info,
fuji_extend_green(info->linebuf, line_width);
fuji_extend_blue(info->linebuf, line_width);
-
- if (errcnt)
- ThrowRDE("xtrans_decode_block");
}
void FujiDecompressor::fuji_bayer_decode_block(fuji_compressed_block* info,
@@ -641,25 +628,23 @@ void FujiDecompressor::fuji_bayer_decode_block(fuji_compressed_block* info,
ColorPos g;
ColorPos b;
- int errcnt = 0;
-
const int line_width = common_info.line_width;
auto pass = [&](_xt_lines c0, _xt_lines c1, int grad, ColorPos& c0_pos,
ColorPos& c1_pos) {
while (g.even < line_width || g.odd < line_width) {
if (g.even < line_width) {
- errcnt += fuji_decode_sample_even(info, pump, info->linebuf[c0] + 1,
- &c0_pos.even, info->grad_even[grad]);
- errcnt += fuji_decode_sample_even(info, pump, info->linebuf[c1] + 1,
- &c1_pos.even, info->grad_even[grad]);
+ fuji_decode_sample_even(info, pump, info->linebuf[c0] + 1, &c0_pos.even,
+ &(info->grad_even[grad]));
+ fuji_decode_sample_even(info, pump, info->linebuf[c1] + 1, &c1_pos.even,
+ &(info->grad_even[grad]));
}
if (g.even > 8) {
- errcnt += fuji_decode_sample_odd(info, pump, info->linebuf[c0] + 1,
- &c0_pos.odd, info->grad_odd[grad]);
- errcnt += fuji_decode_sample_odd(info, pump, info->linebuf[c1] + 1,
- &c1_pos.odd, info->grad_odd[grad]);
+ fuji_decode_sample_odd(info, pump, info->linebuf[c0] + 1, &c0_pos.odd,
+ &(info->grad_odd[grad]));
+ fuji_decode_sample_odd(info, pump, info->linebuf[c1] + 1, &c1_pos.odd,
+ &(info->grad_odd[grad]));
}
}
};
@@ -703,9 +688,6 @@ void FujiDecompressor::fuji_bayer_decode_block(fuji_compressed_block* info,
b.reset();
pass_GB(_G7, _B4, 2);
-
- if (errcnt)
- ThrowRDE("fuji decode bayer block");
}
void FujiDecompressor::fuji_decode_strip(
@@ -719,9 +701,9 @@ void FujiDecompressor::fuji_decode_strip(
int b;
};
- const i_pair mtable[6] = {{_R0, _R3}, {_R1, _R4}, {_G0, _G6},
- {_G1, _G7}, {_B0, _B3}, {_B1, _B4}};
- const i_pair ztable[3] = {{_R2, 3}, {_G2, 6}, {_B2, 3}};
+ const std::array<i_pair, 6> mtable = {
+ {{_R0, _R3}, {_R1, _R4}, {_G0, _G6}, {_G1, _G7}, {_B0, _B3}, {_B1, _B4}}};
+ const std::array<i_pair, 3> ztable = {{{_R2, 3}, {_G2, 6}, {_B2, 3}}};
for (int cur_line = 0; cur_line < strip.height(); cur_line++) {
if (header.raw_type == 16) {
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/FujiDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/FujiDecompressor.h
index 1955e9036..f4a0d9f27 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/FujiDecompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/FujiDecompressor.h
@@ -26,9 +26,9 @@
#include "io/BitPumpMSB.h" // for BitPumpMSB
#include "io/ByteStream.h" // for ByteStream
#include "metadata/ColorFilterArray.h" // for CFAColor
-#include <algorithm> // for move
#include <array> // for array
#include <cassert> // for assert
+#include <utility> // for move
#include <vector> // for vector
namespace rawspeed {
@@ -115,7 +115,7 @@ protected:
explicit fuji_compressed_params(const FujiDecompressor& d);
std::vector<char> q_table; /* quantization table */
- int q_point[5]; /* quantization points */
+ std::array<int, 5> q_point; /* quantization points */
int max_bits;
int min_value;
int raw_bits;
@@ -158,10 +158,12 @@ protected:
void reset(const fuji_compressed_params* params);
- int_pair grad_even[3][41]; // tables of gradients
- int_pair grad_odd[3][41];
+ // tables of gradients
+ std::array<std::array<int_pair, 41>, 3> grad_even;
+ std::array<std::array<int_pair, 41>, 3> grad_odd;
+
std::vector<ushort16> linealloc;
- ushort16* linebuf[_ltotal];
+ std::array<ushort16*, _ltotal> linebuf;
};
private:
@@ -183,27 +185,30 @@ private:
void copy_line_to_bayer(fuji_compressed_block* info, const FujiStrip& strip,
int cur_line) const;
- void fuji_zerobits(BitPumpMSB* pump, int* count) const;
+ inline void fuji_zerobits(BitPumpMSB* pump, int* count) const;
int bitDiff(int value1, int value2) const;
template <typename T1, typename T2>
- int fuji_decode_sample(T1&& func_0, T2&& func_1, fuji_compressed_block* info,
- BitPumpMSB* pump, ushort16* line_buf, int* pos,
- int_pair* grads) const;
- int fuji_decode_sample_even(fuji_compressed_block* info, BitPumpMSB* pump,
+ void fuji_decode_sample(T1&& func_0, T2&& func_1, fuji_compressed_block* info,
+ BitPumpMSB* pump, ushort16* line_buf, int* pos,
+ std::array<int_pair, 41>* grads) const;
+ void fuji_decode_sample_even(fuji_compressed_block* info, BitPumpMSB* pump,
+ ushort16* line_buf, int* pos,
+ std::array<int_pair, 41>* grads) const;
+ void fuji_decode_sample_odd(fuji_compressed_block* info, BitPumpMSB* pump,
ushort16* line_buf, int* pos,
- int_pair* grads) const;
- int fuji_decode_sample_odd(fuji_compressed_block* info, BitPumpMSB* pump,
- ushort16* line_buf, int* pos,
- int_pair* grads) const;
+ std::array<int_pair, 41>* grads) const;
void fuji_decode_interpolation_even(int line_width, ushort16* line_buf,
int* pos) const;
- void fuji_extend_generic(ushort16* linebuf[_ltotal], int line_width,
- int start, int end) const;
- void fuji_extend_red(ushort16* linebuf[_ltotal], int line_width) const;
- void fuji_extend_green(ushort16* linebuf[_ltotal], int line_width) const;
- void fuji_extend_blue(ushort16* linebuf[_ltotal], int line_width) const;
+ void fuji_extend_generic(std::array<ushort16*, _ltotal> linebuf,
+ int line_width, int start, int end) const;
+ void fuji_extend_red(std::array<ushort16*, _ltotal> linebuf,
+ int line_width) const;
+ void fuji_extend_green(std::array<ushort16*, _ltotal> linebuf,
+ int line_width) const;
+ void fuji_extend_blue(std::array<ushort16*, _ltotal> linebuf,
+ int line_width) const;
void xtrans_decode_block(fuji_compressed_block* info,
BitPumpMSB* pump, int cur_line) const;
void fuji_bayer_decode_block(fuji_compressed_block* info,
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/HasselbladDecompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/HasselbladDecompressor.cpp
index 11a74cf2e..ff0e345cf 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/HasselbladDecompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/HasselbladDecompressor.cpp
@@ -41,7 +41,7 @@ HasselbladDecompressor::HasselbladDecompressor(const ByteStream& bs,
// FIXME: could be wrong. max "active pixels" - "100 MP"
if (mRaw->dim.x == 0 || mRaw->dim.y == 0 || mRaw->dim.x % 2 != 0 ||
- mRaw->dim.x > 11600 || mRaw->dim.y > 8700) {
+ mRaw->dim.x > 12000 || mRaw->dim.y > 8816) {
ThrowRDE("Unexpected image dimensions found: (%u; %u)", mRaw->dim.x,
mRaw->dim.y);
}
@@ -82,8 +82,10 @@ void HasselbladDecompressor::decodeScan() {
int len2 = ht[0]->decodeLength(bitStream);
p1 += getBits(&bitStream, len1);
p2 += getBits(&bitStream, len2);
- dest[x] = p1;
- dest[x+1] = p2;
+ // NOTE: this is rather unusual and weird, but appears to be correct.
+ // clampBits(p, 16) results in completely garbled images.
+ dest[x] = ushort16(p1);
+ dest[x + 1] = ushort16(p2);
}
}
input.skipBytes(bitStream.getBufferPosition());
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/HasselbladDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/HasselbladDecompressor.h
index 03535d3cb..47ad90945 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/HasselbladDecompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/HasselbladDecompressor.h
@@ -23,11 +23,10 @@
#include "decompressors/AbstractLJpegDecompressor.h" // for AbstractLJpegDe...
#include "io/BitPumpMSB32.h" // for BitPumpMSB32
-#include "io/Buffer.h" // for Buffer, Buffer:...
-#include "io/ByteStream.h" // for ByteStream
namespace rawspeed {
+class ByteStream;
class RawImage;
class HasselbladDecompressor final : public AbstractLJpegDecompressor
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTable.h b/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTable.h
index 260df3814..6f54bcac1 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTable.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTable.h
@@ -1,8 +1,7 @@
/*
RawSpeed - RAW file decoder.
- Copyright (C) 2017 Axel Waggershauser
- Copyright (C) 2017 Roman Lebedev
+ Copyright (C) 2018 Roman Lebedev
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -21,346 +20,20 @@
#pragma once
-#include "common/Common.h" // for ushort16, uchar8, int32
-#include "decoders/RawDecoderException.h" // for ThrowRDE
-#include "io/Buffer.h" // for Buffer
-#include <algorithm> // for copy
-#include <cassert> // for assert
-#include <cstddef> // for size_t
-#include <iterator> // for distance
-#include <numeric> // for accumulate
-#include <vector> // for vector, allocator, operator==
+// IWYU pragma: begin_exports
-/*
-* The following code is inspired by the IJG JPEG library.
-*
-* Copyright (C) 1991, 1992, Thomas G. Lane.
-* Part of the Independent JPEG Group's software.
-* See the file Copyright for more details.
-*
-* Copyright (c) 1993 Brian C. Smith, The Regents of the University
-* of California
-* All rights reserved.
-*
-* Copyright (c) 1994 Kongji Huang and Brian C. Smith.
-* Cornell University
-* All rights reserved.
-*
-* Permission to use, copy, modify, and distribute this software and its
-* documentation for any purpose, without fee, and without written agreement is
-* hereby granted, provided that the above copyright notice and the following
-* two paragraphs appear in all copies of this software.
-*
-* IN NO EVENT SHALL CORNELL UNIVERSITY BE LIABLE TO ANY PARTY FOR
-* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
-* OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF CORNELL
-* UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*
-* CORNELL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
-* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
-* AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
-* ON AN "AS IS" BASIS, AND CORNELL UNIVERSITY HAS NO OBLIGATION TO
-* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-*/
-
-namespace rawspeed {
-
-class HuffmanTable final {
- // private fields calculated from codesPerBits and codeValues
- // they are index '1' based, so we can directly lookup the value
- // for code length l without decrementing
- std::vector<ushort16> maxCodeOL; // index is length of code
- std::vector<ushort16> codeOffsetOL; // index is length of code
-
- // The code can be compiled with two different decode lookup table layouts.
- // The idea is that different CPU architectures may perform better with
- // one or the other, depending on the relative performance of their arithmetic
- // core vs their memory access. For an Intel Core i7, the big table is better.
-#if 1
- // lookup table containing 3 fields: payload:16|flag:8|len:8
- // The payload may be the fully decoded diff or the length of the diff.
- // The len field contains the number of bits, this lookup consumed.
- // A lookup value of 0 means the code was too big to fit into the table.
- // The optimal LookupDepth is also likely to depend on the CPU architecture.
- static constexpr unsigned PayloadShift = 16;
- static constexpr unsigned FlagMask = 0x100;
- static constexpr unsigned LenMask = 0xff;
- static constexpr unsigned LookupDepth = 11;
- std::vector<int32> decodeLookup;
-#else
- // lookup table containing 2 fields: payload:4|len:4
- // the payload is the length of the diff, len is the length of the code
- static constexpr unsigned LookupDepth = 15;
- static constexpr unsigned PayloadShift = 4;
- static constexpr unsigned FlagMask = 0;
- static constexpr unsigned LenMask = 0x0f;
- std::vector<uchar8> decodeLookup;
-#endif
-
- bool fullDecode = true;
- bool fixDNGBug16 = false;
-
- inline size_t __attribute__((pure)) maxCodePlusDiffLength() const {
- return nCodesPerLength.size() - 1 +
- *(std::max_element(codeValues.cbegin(), codeValues.cend()));
- }
-
- // These two fields directly represent the contents of a JPEG DHT field
-
- // 1. The number of codes there are per bit length, this is index 1 based.
- // (there are always 0 codes of length 0)
- std::vector<unsigned int> nCodesPerLength; // index is length of code
- inline unsigned int __attribute__((pure)) maxCodesCount() const {
- return std::accumulate(nCodesPerLength.begin(), nCodesPerLength.end(), 0U);
- }
-
- // 2. This is the actual huffman encoded data, i.e. the 'alphabet'. Each value
- // is the number of bits following the code that encode the difference to the
- // last pixel. Valid values are in the range 0..16.
- // signExtended() is used to decode the difference bits to a signed int.
- std::vector<uchar8> codeValues; // index is just sequential number
-
-public:
- bool operator==(const HuffmanTable& other) const {
- return nCodesPerLength == other.nCodesPerLength
- && codeValues == other.codeValues;
- }
-
- uint32 setNCodesPerLength(const Buffer& data) {
- assert(data.getSize() == 16);
-
- nCodesPerLength.resize(17, 0);
- std::copy(data.begin(), data.end(), &nCodesPerLength[1]);
- assert(nCodesPerLength[0] == 0);
-
- // trim empty entries from the codes per length table on the right
- while (!nCodesPerLength.empty() && nCodesPerLength.back() == 0)
- nCodesPerLength.pop_back();
-
- if (nCodesPerLength.empty())
- ThrowRDE("Codes-per-length table is empty");
-
- assert(nCodesPerLength.back() > 0);
-
- const auto count = maxCodesCount();
- assert(count > 0);
-
- if (count > 162)
- ThrowRDE("Too big code-values table");
-
- for (auto codeLen = 1U; codeLen < nCodesPerLength.size(); codeLen++) {
- // we have codeLen bits. make sure that that code count can actually fit
- const auto nCodes = nCodesPerLength[codeLen];
- if (nCodes > ((1U << codeLen) - 1U)) {
- ThrowRDE("Corrupt Huffman. Can not have %u codes in %u-bit len", nCodes,
- codeLen);
- }
- }
-
- return count;
- }
-
- void setCodeValues(const Buffer& data) {
- // spec says max 16 but Hasselblad ignores that -> allow 17
- // Canon's old CRW really ignores this ...
- assert(data.getSize() <= 162);
- assert(data.getSize() == maxCodesCount());
-
- codeValues.clear();
- codeValues.reserve(maxCodesCount());
- std::copy(data.begin(), data.end(), std::back_inserter(codeValues));
- assert(codeValues.size() == maxCodesCount());
-
- for (const auto cValue : codeValues) {
- if (cValue > 16)
- ThrowRDE("Corrupt Huffman. Code value %u is bigger than 16", cValue);
- }
- }
-
- void setup(bool fullDecode_, bool fixDNGBug16_) {
- this->fullDecode = fullDecode_;
- this->fixDNGBug16 = fixDNGBug16_;
-
- // store the code lengths in bits, valid values are 0..16
- std::vector<uchar8> code_len; // index is just sequential number
- // store the codes themselves (bit patterns found inside the stream)
- std::vector<ushort16> codes; // index is just sequential number
-
- assert(!nCodesPerLength.empty());
- assert(maxCodesCount() > 0);
+#include "decompressors/HuffmanTableLUT.h" // for HuffmanTableLUT
+// #include "decompressors/HuffmanTableLookup.h" // for HuffmanTableLookup
+// #include "decompressors/HuffmanTableTree.h" // for HuffmanTableTree
+// #include "decompressors/HuffmanTableVector.h" // for HuffmanTableVector
- unsigned int maxCodeLength = nCodesPerLength.size() - 1U;
- assert(codeValues.size() == maxCodesCount());
+// IWYU pragma: end_exports
- assert(maxCodePlusDiffLength() <= 32U);
-
- // reserve all the memory. avoids lots of small allocs
- code_len.reserve(maxCodesCount());
- codes.reserve(maxCodesCount());
-
- // Figure C.1: make table of Huffman code length for each symbol
- // Figure C.2: generate the codes themselves
- uint32 code = 0;
- for (unsigned int l = 1; l <= maxCodeLength; ++l) {
- assert(nCodesPerLength[l] <= ((1U << l) - 1U));
-
- for (unsigned int i = 0; i < nCodesPerLength[l]; ++i) {
- if (code > 0xffff) {
- ThrowRDE("Corrupt Huffman: code value overflow on len = %u, %u-th "
- "code out of %u\n",
- l, i, nCodesPerLength[l]);
- }
-
- code_len.push_back(l);
- codes.push_back(code);
- code++;
- }
- code <<= 1;
- }
-
- assert(code_len.size() == maxCodesCount());
- assert(codes.size() == maxCodesCount());
-
- // Figure F.15: generate decoding tables
- codeOffsetOL.resize(maxCodeLength + 1UL, 0xffff);
- maxCodeOL.resize(maxCodeLength + 1UL);
- int code_index = 0;
- for (unsigned int l = 1U; l <= maxCodeLength; l++) {
- if (nCodesPerLength[l]) {
- codeOffsetOL[l] = codes[code_index] - code_index;
- code_index += nCodesPerLength[l];
- maxCodeOL[l] = codes[code_index - 1];
- }
- }
-
- // Generate lookup table for fast decoding lookup.
- // See definition of decodeLookup above
- decodeLookup.resize(1 << LookupDepth);
- for (size_t i = 0; i < codes.size(); i++) {
- uchar8 code_l = code_len[i];
- if (code_l > static_cast<int>(LookupDepth))
- break;
-
- ushort16 ll = codes[i] << (LookupDepth - code_l);
- ushort16 ul = ll | ((1 << (LookupDepth - code_l)) - 1);
- ushort16 diff_l = codeValues[i];
- for (ushort16 c = ll; c <= ul; c++) {
- if (!(c < decodeLookup.size()))
- ThrowRDE("Corrupt Huffman");
-
- if (!FlagMask || !fullDecode || diff_l + code_l > LookupDepth) {
- // lookup bit depth is too small to fit both the encoded length
- // and the final difference value.
- // -> store only the length and do a normal sign extension later
- decodeLookup[c] = diff_l << PayloadShift | code_l;
- } else {
- // diff_l + code_l <= lookupDepth
- // The table bit depth is large enough to store both.
- decodeLookup[c] = (code_l + diff_l) | FlagMask;
-
- if (diff_l) {
- uint32 diff = (c >> (LookupDepth - code_l - diff_l)) & ((1 << diff_l) - 1);
- decodeLookup[c] |= static_cast<uint32>(signExtended(diff, diff_l))
- << PayloadShift;
- }
- }
- }
- }
- }
-
- // WARNING: the caller should check that len != 0 before calling the function
- inline static int __attribute__((const))
- signExtended(uint32 diff, uint32 len) {
- int32 ret = diff;
-#if 0
-#define _X(x) (1<<x)-1
- constexpr static int offset[16] = {
- 0, _X(1), _X(2), _X(3), _X(4), _X(5), _X(6), _X(7),
- _X(8), _X(9), _X(10), _X(11), _X(12), _X(13), _X(14), _X(15)};
-#undef _X
- if ((diff & (1 << (len - 1))) == 0)
- ret -= offset[len];
-#else
- if ((diff & (1 << (len - 1))) == 0)
- ret -= (1 << len) - 1;
-#endif
- return ret;
- }
-
- template<typename BIT_STREAM> inline int decodeLength(BIT_STREAM& bs) const {
- assert(!fullDecode);
- return decode<BIT_STREAM, false>(bs);
- }
-
- template<typename BIT_STREAM> inline int decodeNext(BIT_STREAM& bs) const {
- assert(fullDecode);
- return decode<BIT_STREAM, true>(bs);
- }
-
- // The bool template paraeter is to enable two versions:
- // one returning only the length of the of diff bits (see Hasselblad),
- // one to return the fully decoded diff.
- // All ifs depending on this bool will be optimized out by the compiler
- template<typename BIT_STREAM, bool FULL_DECODE> inline int decode(BIT_STREAM& bs) const {
- assert(FULL_DECODE == fullDecode);
-
- // 32 is the absolute maximum combined length of code + diff
- // assertion maxCodePlusDiffLength() <= 32U is already checked in setup()
- bs.fill(32);
-
- // for processors supporting bmi2 instructions, using maxCodePlusDiffLength()
- // might be benifitial
-
- uint32 code = bs.peekBitsNoFill(LookupDepth);
- assert(code < decodeLookup.size());
- int val = decodeLookup[code];
- int len = val & LenMask;
- assert(len >= 0);
- assert(len <= 16);
-
- // if the code is invalid (bitstream corrupted) len will be 0
- bs.skipBitsNoFill(len);
- if (FULL_DECODE && val & FlagMask) {
- // if the flag bit is set, the payload is the already sign extended difference
- return val >> PayloadShift;
- }
-
- if (len) {
- // if the flag bit is not set but len != 0, the payload is the number of bits to sign extend and return
- const int l_diff = val >> PayloadShift;
- assert((FULL_DECODE && (len + l_diff <= 32)) || !FULL_DECODE);
- return FULL_DECODE ? signExtended(bs.getBitsNoFill(l_diff), l_diff) : l_diff;
- }
-
- uint32 code_l = LookupDepth;
- bs.skipBitsNoFill(code_l);
- while (code_l < maxCodeOL.size() && code > maxCodeOL[code_l]) {
- uint32 temp = bs.getBitsNoFill(1);
- code = (code << 1) | temp;
- code_l++;
- }
-
- if (code_l >= maxCodeOL.size() || code > maxCodeOL[code_l])
- ThrowRDE("bad Huffman code: %u (len: %u)", code, code_l);
-
- if (code < codeOffsetOL[code_l])
- ThrowRDE("likely corrupt Huffman code: %u (len: %u)", code, code_l);
-
- int diff_l = codeValues[code - codeOffsetOL[code_l]];
-
- if (!FULL_DECODE)
- return diff_l;
-
- if (diff_l == 16) {
- if (fixDNGBug16)
- bs.skipBits(16);
- return -32768;
- }
+namespace rawspeed {
- assert(FULL_DECODE);
- assert((diff_l && (len + code_l + diff_l <= 32)) || !diff_l);
- return diff_l ? signExtended(bs.getBitsNoFill(diff_l), diff_l) : 0;
- }
-};
+using HuffmanTable = HuffmanTableLUT;
+// using HuffmanTable = HuffmanTableLookup;
+// using HuffmanTable = HuffmanTableTree;
+// using HuffmanTable = HuffmanTableVector;
} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableLUT.h b/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableLUT.h
new file mode 100644
index 000000000..45c7c31c5
--- /dev/null
+++ b/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableLUT.h
@@ -0,0 +1,257 @@
+/*
+ RawSpeed - RAW file decoder.
+
+ Copyright (C) 2017 Axel Waggershauser
+ Copyright (C) 2017-2018 Roman Lebedev
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#pragma once
+
+#include "common/Common.h" // for uint32, ushort16, int32
+#include "decoders/RawDecoderException.h" // for ThrowRDE
+#include "decompressors/AbstractHuffmanTable.h" // for AbstractHuffmanTable
+#include "io/BitStream.h" // for BitStreamTraits
+#include <cassert> // for assert
+#include <cstddef> // for size_t
+#include <memory> // for allocator_traits<>::...
+#include <vector> // for vector
+
+/*
+* The following code is inspired by the IJG JPEG library.
+*
+* Copyright (C) 1991, 1992, Thomas G. Lane.
+* Part of the Independent JPEG Group's software.
+* See the file Copyright for more details.
+*
+* Copyright (c) 1993 Brian C. Smith, The Regents of the University
+* of California
+* All rights reserved.
+*
+* Copyright (c) 1994 Kongji Huang and Brian C. Smith.
+* Cornell University
+* All rights reserved.
+*
+* Permission to use, copy, modify, and distribute this software and its
+* documentation for any purpose, without fee, and without written agreement is
+* hereby granted, provided that the above copyright notice and the following
+* two paragraphs appear in all copies of this software.
+*
+* IN NO EVENT SHALL CORNELL UNIVERSITY BE LIABLE TO ANY PARTY FOR
+* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
+* OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF CORNELL
+* UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+* CORNELL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+* AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+* ON AN "AS IS" BASIS, AND CORNELL UNIVERSITY HAS NO OBLIGATION TO
+* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+*/
+
+namespace rawspeed {
+
+class HuffmanTableLUT final : public AbstractHuffmanTable {
+ // private fields calculated from codesPerBits and codeValues
+ // they are index '1' based, so we can directly lookup the value
+ // for code length l without decrementing
+ std::vector<uint32> maxCodeOL; // index is length of code
+ std::vector<ushort16> codeOffsetOL; // index is length of code
+
+ // The code can be compiled with two different decode lookup table layouts.
+ // The idea is that different CPU architectures may perform better with
+ // one or the other, depending on the relative performance of their arithmetic
+ // core vs their memory access. For an Intel Core i7, the big table is better.
+#if 1
+ // lookup table containing 3 fields: payload:16|flag:8|len:8
+ // The payload may be the fully decoded diff or the length of the diff.
+ // The len field contains the number of bits, this lookup consumed.
+ // A lookup value of 0 means the code was too big to fit into the table.
+ // The optimal LookupDepth is also likely to depend on the CPU architecture.
+ static constexpr unsigned PayloadShift = 16;
+ static constexpr unsigned FlagMask = 0x100;
+ static constexpr unsigned LenMask = 0xff;
+ static constexpr unsigned LookupDepth = 11;
+ std::vector<int32> decodeLookup;
+#else
+ // lookup table containing 2 fields: payload:4|len:4
+ // the payload is the length of the diff, len is the length of the code
+ static constexpr unsigned LookupDepth = 15;
+ static constexpr unsigned PayloadShift = 4;
+ static constexpr unsigned FlagMask = 0;
+ static constexpr unsigned LenMask = 0x0f;
+ std::vector<uchar8> decodeLookup;
+#endif
+
+ bool fullDecode = true;
+ bool fixDNGBug16 = false;
+
+public:
+ void setup(bool fullDecode_, bool fixDNGBug16_) {
+ this->fullDecode = fullDecode_;
+ this->fixDNGBug16 = fixDNGBug16_;
+
+ assert(!nCodesPerLength.empty());
+ assert(maxCodesCount() > 0);
+
+ unsigned int maxCodeLength = nCodesPerLength.size() - 1U;
+ assert(codeValues.size() == maxCodesCount());
+
+ assert(maxCodePlusDiffLength() <= 32U);
+
+ // Figure C.1: make table of Huffman code length for each symbol
+ // Figure C.2: generate the codes themselves
+ const auto symbols = generateCodeSymbols();
+ assert(symbols.size() == maxCodesCount());
+
+ // Figure F.15: generate decoding tables
+ codeOffsetOL.resize(maxCodeLength + 1UL, 0xFFFF);
+ maxCodeOL.resize(maxCodeLength + 1UL, 0xFFFFFFFF);
+ int code_index = 0;
+ for (unsigned int l = 1U; l <= maxCodeLength; l++) {
+ if (nCodesPerLength[l]) {
+ codeOffsetOL[l] = symbols[code_index].code - code_index;
+ code_index += nCodesPerLength[l];
+ maxCodeOL[l] = symbols[code_index - 1].code;
+ }
+ }
+
+ // Generate lookup table for fast decoding lookup.
+ // See definition of decodeLookup above
+ decodeLookup.resize(1 << LookupDepth);
+ for (size_t i = 0; i < symbols.size(); i++) {
+ uchar8 code_l = symbols[i].code_len;
+ if (code_l > static_cast<int>(LookupDepth))
+ break;
+
+ ushort16 ll = symbols[i].code << (LookupDepth - code_l);
+ ushort16 ul = ll | ((1 << (LookupDepth - code_l)) - 1);
+ ushort16 diff_l = codeValues[i];
+ for (ushort16 c = ll; c <= ul; c++) {
+ if (!(c < decodeLookup.size()))
+ ThrowRDE("Corrupt Huffman");
+
+ if (!FlagMask || !fullDecode || diff_l + code_l > LookupDepth) {
+ // lookup bit depth is too small to fit both the encoded length
+ // and the final difference value.
+ // -> store only the length and do a normal sign extension later
+ decodeLookup[c] = diff_l << PayloadShift | code_l;
+ } else {
+ // diff_l + code_l <= lookupDepth
+ // The table bit depth is large enough to store both.
+ decodeLookup[c] = (code_l + diff_l) | FlagMask;
+
+ if (diff_l) {
+ uint32 diff = (c >> (LookupDepth - code_l - diff_l)) & ((1 << diff_l) - 1);
+ decodeLookup[c] |= static_cast<int32>(
+ static_cast<uint32>(signExtended(diff, diff_l))
+ << PayloadShift);
+ }
+ }
+ }
+ }
+ }
+
+ template<typename BIT_STREAM> inline int decodeLength(BIT_STREAM& bs) const {
+ static_assert(BitStreamTraits<BIT_STREAM>::canUseWithHuffmanTable,
+ "This BitStream specialization is not marked as usable here");
+ assert(!fullDecode);
+ return decode<BIT_STREAM, false>(bs);
+ }
+
+ template<typename BIT_STREAM> inline int decodeNext(BIT_STREAM& bs) const {
+ static_assert(BitStreamTraits<BIT_STREAM>::canUseWithHuffmanTable,
+ "This BitStream specialization is not marked as usable here");
+ assert(fullDecode);
+ return decode<BIT_STREAM, true>(bs);
+ }
+
+ // The bool template paraeter is to enable two versions:
+ // one returning only the length of the of diff bits (see Hasselblad),
+ // one to return the fully decoded diff.
+ // All ifs depending on this bool will be optimized out by the compiler
+ template<typename BIT_STREAM, bool FULL_DECODE> inline int decode(BIT_STREAM& bs) const {
+ static_assert(BitStreamTraits<BIT_STREAM>::canUseWithHuffmanTable,
+ "This BitStream specialization is not marked as usable here");
+ assert(FULL_DECODE == fullDecode);
+
+ // 32 is the absolute maximum combined length of code + diff
+ // assertion maxCodePlusDiffLength() <= 32U is already checked in setup()
+ bs.fill(32);
+
+ // for processors supporting bmi2 instructions, using maxCodePlusDiffLength()
+ // might be benifitial
+
+ uint32 code = bs.peekBitsNoFill(LookupDepth);
+ assert(code < decodeLookup.size());
+ auto val = static_cast<unsigned>(decodeLookup[code]);
+ int len = val & LenMask;
+ assert(len >= 0);
+ assert(len <= 16);
+
+ // if the code is invalid (bitstream corrupted) len will be 0
+ bs.skipBitsNoFill(len);
+ if (FULL_DECODE && val & FlagMask) {
+ // if the flag bit is set, the payload is the already sign extended difference
+ return static_cast<int>(val) >> PayloadShift;
+ }
+
+ if (len) {
+ // if the flag bit is not set but len != 0, the payload is the number of bits to sign extend and return
+ const int l_diff = static_cast<int>(val) >> PayloadShift;
+ assert((FULL_DECODE && (len + l_diff <= 32)) || !FULL_DECODE);
+ if (FULL_DECODE && l_diff == 16) {
+ if (fixDNGBug16)
+ bs.skipBits(16);
+ return -32768;
+ }
+ return FULL_DECODE ? signExtended(bs.getBitsNoFill(l_diff), l_diff) : l_diff;
+ }
+
+ uint32 code_l = LookupDepth;
+ bs.skipBitsNoFill(code_l);
+ while (code_l < maxCodeOL.size() &&
+ (0xFFFFFFFF == maxCodeOL[code_l] || code > maxCodeOL[code_l])) {
+ uint32 temp = bs.getBitsNoFill(1);
+ code = (code << 1) | temp;
+ code_l++;
+ }
+
+ if (code_l >= maxCodeOL.size() ||
+ (0xFFFFFFFF == maxCodeOL[code_l] || code > maxCodeOL[code_l]))
+ ThrowRDE("bad Huffman code: %u (len: %u)", code, code_l);
+
+ if (code < codeOffsetOL[code_l])
+ ThrowRDE("likely corrupt Huffman code: %u (len: %u)", code, code_l);
+
+ int diff_l = codeValues[code - codeOffsetOL[code_l]];
+
+ if (!FULL_DECODE)
+ return diff_l;
+
+ if (diff_l == 16) {
+ if (fixDNGBug16)
+ bs.skipBits(16);
+ return -32768;
+ }
+
+ assert(FULL_DECODE);
+ assert((diff_l && (len + code_l + diff_l <= 32)) || !diff_l);
+ return diff_l ? signExtended(bs.getBitsNoFill(diff_l), diff_l) : 0;
+ }
+};
+
+} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableLookup.h b/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableLookup.h
new file mode 100644
index 000000000..df8408ccf
--- /dev/null
+++ b/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableLookup.h
@@ -0,0 +1,171 @@
+/*
+ RawSpeed - RAW file decoder.
+
+ Copyright (C) 2017 Axel Waggershauser
+ Copyright (C) 2017-2018 Roman Lebedev
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#pragma once
+
+#include "common/Common.h" // for uint32, ushort16
+#include "decoders/RawDecoderException.h" // for ThrowRDE
+#include "decompressors/AbstractHuffmanTable.h" // for AbstractHuffmanTable
+#include "io/BitStream.h" // for BitStreamTraits
+#include <cassert> // for assert
+#include <memory> // for allocator_traits<>::...
+#include <vector> // for vector
+
+/*
+ * The following code is inspired by the IJG JPEG library.
+ *
+ * Copyright (C) 1991, 1992, Thomas G. Lane.
+ * Part of the Independent JPEG Group's software.
+ * See the file Copyright for more details.
+ *
+ * Copyright (c) 1993 Brian C. Smith, The Regents of the University
+ * of California
+ * All rights reserved.
+ *
+ * Copyright (c) 1994 Kongji Huang and Brian C. Smith.
+ * Cornell University
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software and its
+ * documentation for any purpose, without fee, and without written agreement is
+ * hereby granted, provided that the above copyright notice and the following
+ * two paragraphs appear in all copies of this software.
+ *
+ * IN NO EVENT SHALL CORNELL UNIVERSITY BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
+ * OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF CORNELL
+ * UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * CORNELL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND CORNELL UNIVERSITY HAS NO OBLIGATION TO
+ * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+ */
+
+namespace rawspeed {
+
+class HuffmanTableLookup final : public AbstractHuffmanTable {
+ // private fields calculated from codesPerBits and codeValues
+ // they are index '1' based, so we can directly lookup the value
+ // for code length l without decrementing
+ std::vector<uint32> maxCodeOL; // index is length of code
+ std::vector<ushort16> codeOffsetOL; // index is length of code
+
+ bool fullDecode = true;
+ bool fixDNGBug16 = false;
+
+public:
+ void setup(bool fullDecode_, bool fixDNGBug16_) {
+ this->fullDecode = fullDecode_;
+ this->fixDNGBug16 = fixDNGBug16_;
+
+ assert(!nCodesPerLength.empty());
+ assert(maxCodesCount() > 0);
+
+ unsigned int maxCodeLength = nCodesPerLength.size() - 1U;
+ assert(codeValues.size() == maxCodesCount());
+
+ assert(maxCodePlusDiffLength() <= 32U);
+
+ // Figure C.1: make table of Huffman code length for each symbol
+ // Figure C.2: generate the codes themselves
+ const auto symbols = generateCodeSymbols();
+ assert(symbols.size() == maxCodesCount());
+
+ // Figure F.15: generate decoding tables
+ codeOffsetOL.resize(maxCodeLength + 1UL, 0xFFFF);
+ maxCodeOL.resize(maxCodeLength + 1UL, 0xFFFFFFFF);
+ int code_index = 0;
+ for (unsigned int l = 1U; l <= maxCodeLength; l++) {
+ if (nCodesPerLength[l]) {
+ codeOffsetOL[l] = symbols[code_index].code - code_index;
+ code_index += nCodesPerLength[l];
+ maxCodeOL[l] = symbols[code_index - 1].code;
+ }
+ }
+ }
+
+ template <typename BIT_STREAM> inline int decodeLength(BIT_STREAM& bs) const {
+ static_assert(BitStreamTraits<BIT_STREAM>::canUseWithHuffmanTable,
+ "This BitStream specialization is not marked as usable here");
+ assert(!fullDecode);
+ return decode<BIT_STREAM, false>(bs);
+ }
+
+ template <typename BIT_STREAM> inline int decodeNext(BIT_STREAM& bs) const {
+ static_assert(BitStreamTraits<BIT_STREAM>::canUseWithHuffmanTable,
+ "This BitStream specialization is not marked as usable here");
+ assert(fullDecode);
+ return decode<BIT_STREAM, true>(bs);
+ }
+
+ // The bool template paraeter is to enable two versions:
+ // one returning only the length of the of diff bits (see Hasselblad),
+ // one to return the fully decoded diff.
+ // All ifs depending on this bool will be optimized out by the compiler
+ template <typename BIT_STREAM, bool FULL_DECODE>
+ inline int decode(BIT_STREAM& bs) const {
+ static_assert(BitStreamTraits<BIT_STREAM>::canUseWithHuffmanTable,
+ "This BitStream specialization is not marked as usable here");
+ assert(FULL_DECODE == fullDecode);
+
+ // 32 is the absolute maximum combined length of code + diff
+ // assertion maxCodePlusDiffLength() <= 32U is already checked in setup()
+ bs.fill(32);
+
+ // for processors supporting bmi2 instructions, using
+ // maxCodePlusDiffLength() might be benifitial
+
+ uint32 code = 0;
+ uint32 code_l = 0;
+ while (code_l < maxCodeOL.size() &&
+ (0xFFFFFFFF == maxCodeOL[code_l] || code > maxCodeOL[code_l])) {
+ uint32 temp = bs.getBitsNoFill(1);
+ code = (code << 1) | temp;
+ code_l++;
+ }
+
+ if (code_l >= maxCodeOL.size() ||
+ (0xFFFFFFFF == maxCodeOL[code_l] || code > maxCodeOL[code_l]))
+ ThrowRDE("bad Huffman code: %u (len: %u)", code, code_l);
+
+ if (code < codeOffsetOL[code_l])
+ ThrowRDE("likely corrupt Huffman code: %u (len: %u)", code, code_l);
+
+ int diff_l = codeValues[code - codeOffsetOL[code_l]];
+
+ if (!FULL_DECODE)
+ return diff_l;
+
+ if (diff_l == 16) {
+ if (fixDNGBug16)
+ bs.skipBits(16);
+ return -32768;
+ }
+
+ assert(FULL_DECODE);
+ assert((diff_l && (code_l + diff_l <= 32)) || !diff_l);
+ return diff_l ? signExtended(bs.getBitsNoFill(diff_l), diff_l) : 0;
+ }
+};
+
+} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableTree.h b/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableTree.h
new file mode 100644
index 000000000..0dd0088e4
--- /dev/null
+++ b/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableTree.h
@@ -0,0 +1,165 @@
+/*
+ RawSpeed - RAW file decoder.
+
+ Copyright (C) 2017 Axel Waggershauser
+ Copyright (C) 2018 Roman Lebedev
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#pragma once
+
+#include "decoders/RawDecoderException.h" // for ThrowRDE
+#include "decompressors/AbstractHuffmanTable.h" // for AbstractHuffmanTable...
+#include "decompressors/BinaryHuffmanTree.h" // IWYU pragma: export
+#include "io/BitStream.h" // for BitStreamTraits
+#include <algorithm> // for for_each
+#include <cassert> // for assert
+#include <initializer_list> // for initializer_list
+#include <iterator> // for advance, next
+#include <memory> // for unique_ptr, make_unique
+#include <vector> // for vector, vector<>::co...
+
+namespace rawspeed {
+
+class HuffmanTableTree final : public AbstractHuffmanTable {
+ using ValueType = decltype(codeValues)::value_type;
+
+ BinaryHuffmanTree<ValueType> tree;
+
+ bool fullDecode = true;
+ bool fixDNGBug16 = false;
+
+protected:
+ template <typename BIT_STREAM>
+ inline ValueType getValue(BIT_STREAM& bs) const {
+ static_assert(BitStreamTraits<BIT_STREAM>::canUseWithHuffmanTable,
+ "This BitStream specialization is not marked as usable here");
+ CodeSymbol partial;
+
+ const auto* top = &(tree.root->getAsBranch());
+
+ // Read bits until either find the code or detect the uncorrect code
+ for (partial.code = 0, partial.code_len = 1;; ++partial.code_len) {
+ assert(partial.code_len <= 16);
+
+ // Read one more bit
+ const bool bit = bs.getBits(1);
+
+ partial.code <<= 1;
+ partial.code |= bit;
+
+ // What is the last bit, which we have just read?
+
+ // NOTE: The order *IS* important! Left to right, zero to one!
+ const auto& newNode = !bit ? top->zero : top->one;
+
+ if (!newNode) {
+ // Got nothing in this direction.
+ ThrowRDE("bad Huffman code: %u (len: %u)", partial.code,
+ partial.code_len);
+ }
+
+ if (static_cast<decltype(tree)::Node::Type>(*newNode) ==
+ decltype(tree)::Node::Type::Leaf) {
+ // Ok, great, hit a Leaf. This is it.
+ return newNode->getAsLeaf().value;
+ }
+
+ // Else, this is a branch, continue looking.
+ top = &(newNode->getAsBranch());
+ }
+
+ // We have either returned the found symbol, or thrown on uncorrect symbol.
+ __builtin_unreachable();
+ }
+
+public:
+ void setup(bool fullDecode_, bool fixDNGBug16_) {
+ this->fullDecode = fullDecode_;
+ this->fixDNGBug16 = fixDNGBug16_;
+
+ assert(!nCodesPerLength.empty());
+ assert(maxCodesCount() > 0);
+ assert(codeValues.size() == maxCodesCount());
+
+ auto currValue = codeValues.cbegin();
+ for (auto codeLen = 1UL; codeLen < nCodesPerLength.size(); codeLen++) {
+ const auto nCodesForCurrLen = nCodesPerLength[codeLen];
+
+ auto nodes = tree.getAllVacantNodesAtDepth(codeLen);
+ if (nodes.size() < nCodesForCurrLen) {
+ ThrowRDE("Got too many (%u) codes for len %lu, can only have %zu codes",
+ nCodesForCurrLen, codeLen, nodes.size());
+ }
+
+ // Make first nCodesForCurrLen nodes Leafs
+ std::for_each(nodes.cbegin(), std::next(nodes.cbegin(), nCodesForCurrLen),
+ [&currValue](auto* node) {
+ *node =
+ std::make_unique<decltype(tree)::Leaf>(*currValue);
+ std::advance(currValue, 1);
+ });
+ }
+
+ assert(codeValues.cend() == currValue);
+
+ // And get rid of all the branches that do not lead to Leafs.
+ // It is crucial to detect degenerate codes at the earliest.
+ tree.pruneLeaflessBranches();
+ }
+
+ template <typename BIT_STREAM> inline int decodeLength(BIT_STREAM& bs) const {
+ static_assert(BitStreamTraits<BIT_STREAM>::canUseWithHuffmanTable,
+ "This BitStream specialization is not marked as usable here");
+ assert(!fullDecode);
+ return decode<BIT_STREAM, false>(bs);
+ }
+
+ template <typename BIT_STREAM> inline int decodeNext(BIT_STREAM& bs) const {
+ static_assert(BitStreamTraits<BIT_STREAM>::canUseWithHuffmanTable,
+ "This BitStream specialization is not marked as usable here");
+ assert(fullDecode);
+ return decode<BIT_STREAM, true>(bs);
+ }
+
+ // The bool template paraeter is to enable two versions:
+ // one returning only the length of the of diff bits (see Hasselblad),
+ // one to return the fully decoded diff.
+ // All ifs depending on this bool will be optimized out by the compiler
+ template <typename BIT_STREAM, bool FULL_DECODE>
+ inline int decode(BIT_STREAM& bs) const {
+ static_assert(BitStreamTraits<BIT_STREAM>::canUseWithHuffmanTable,
+ "This BitStream specialization is not marked as usable here");
+ assert(FULL_DECODE == fullDecode);
+
+ const auto codeValue = getValue(bs);
+
+ const int diff_l = codeValue;
+
+ if (!FULL_DECODE)
+ return diff_l;
+
+ if (diff_l == 16) {
+ if (fixDNGBug16)
+ bs.skipBits(16);
+ return -32768;
+ }
+
+ return diff_l ? signExtended(bs.getBits(diff_l), diff_l) : 0;
+ }
+};
+
+} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableVector.h b/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableVector.h
new file mode 100644
index 000000000..3a1c8824a
--- /dev/null
+++ b/src/external/rawspeed/src/librawspeed/decompressors/HuffmanTableVector.h
@@ -0,0 +1,155 @@
+/*
+ RawSpeed - RAW file decoder.
+
+ Copyright (C) 2017 Axel Waggershauser
+ Copyright (C) 2018 Roman Lebedev
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#pragma once
+
+#include "decoders/RawDecoderException.h" // for ThrowRDE
+#include "decompressors/AbstractHuffmanTable.h" // for AbstractHuffmanTable...
+#include "io/BitStream.h" // for BitStreamTraits
+#include <cassert> // for assert
+#include <utility> // for make_pair, pair
+#include <vector> // for vector
+
+namespace rawspeed {
+
+class HuffmanTableVector final : public AbstractHuffmanTable {
+ std::vector<CodeSymbol> symbols;
+
+ bool fullDecode = true;
+ bool fixDNGBug16 = false;
+
+ // Given this code len, which code id is the minimal?
+ std::vector<unsigned int> extrCodeIdForLen; // index is length of code
+
+protected:
+ template <typename BIT_STREAM>
+ inline std::pair<CodeSymbol, unsigned> getSymbol(BIT_STREAM& bs) const {
+ static_assert(BitStreamTraits<BIT_STREAM>::canUseWithHuffmanTable,
+ "This BitStream specialization is not marked as usable here");
+
+ CodeSymbol partial;
+ unsigned long codeId;
+
+ // Read bits until either find the code or detect the uncorrect code
+ for (partial.code = 0, partial.code_len = 1;; ++partial.code_len) {
+ assert(partial.code_len <= 16);
+
+ // Read one more bit
+ const bool bit = bs.getBits(1);
+
+ partial.code <<= 1;
+ partial.code |= bit;
+
+ // Given global ordering and the code length, we know the code id range.
+ for (codeId = extrCodeIdForLen[partial.code_len];
+ codeId < extrCodeIdForLen[1U + partial.code_len]; codeId++) {
+ const CodeSymbol& symbol = symbols[codeId];
+ if (symbol == partial) // yay, found?
+ return std::make_pair(symbol, codeId);
+ }
+
+ // Ok, but does any symbol have this same prefix?
+ bool haveCommonPrefix = false;
+ for (; codeId < symbols.size(); codeId++) {
+ const CodeSymbol& symbol = symbols[codeId];
+ haveCommonPrefix |= CodeSymbol::HaveCommonPrefix(symbol, partial);
+ if (haveCommonPrefix)
+ break;
+ }
+
+ // If no symbols have this prefix, then the code is invalid.
+ if (!haveCommonPrefix) {
+ ThrowRDE("bad Huffman code: %u (len: %u)", partial.code,
+ partial.code_len);
+ }
+ }
+
+ // We have either returned the found symbol, or thrown on uncorrect symbol.
+ __builtin_unreachable();
+ }
+
+public:
+ void setup(bool fullDecode_, bool fixDNGBug16_) {
+ this->fullDecode = fullDecode_;
+ this->fixDNGBug16 = fixDNGBug16_;
+
+ assert(!nCodesPerLength.empty());
+ assert(maxCodesCount() > 0);
+ assert(codeValues.size() == maxCodesCount());
+
+ // Figure C.1: make table of Huffman code length for each symbol
+ // Figure C.2: generate the codes themselves
+ symbols = generateCodeSymbols();
+ assert(symbols.size() == maxCodesCount());
+
+ extrCodeIdForLen.reserve(1U + nCodesPerLength.size());
+ extrCodeIdForLen.resize(2); // for len 0 and 1, the min code id is always 0
+ for (auto codeLen = 1UL; codeLen < nCodesPerLength.size(); codeLen++) {
+ auto minCodeId = extrCodeIdForLen.back();
+ minCodeId += nCodesPerLength[codeLen];
+ extrCodeIdForLen.emplace_back(minCodeId);
+ }
+ assert(extrCodeIdForLen.size() == 1U + nCodesPerLength.size());
+ }
+
+ template <typename BIT_STREAM> inline int decodeLength(BIT_STREAM& bs) const {
+ static_assert(BitStreamTraits<BIT_STREAM>::canUseWithHuffmanTable,
+ "This BitStream specialization is not marked as usable here");
+ assert(!fullDecode);
+ return decode<BIT_STREAM, false>(bs);
+ }
+
+ template <typename BIT_STREAM> inline int decodeNext(BIT_STREAM& bs) const {
+ static_assert(BitStreamTraits<BIT_STREAM>::canUseWithHuffmanTable,
+ "This BitStream specialization is not marked as usable here");
+ assert(fullDecode);
+ return decode<BIT_STREAM, true>(bs);
+ }
+
+ // The bool template paraeter is to enable two versions:
+ // one returning only the length of the of diff bits (see Hasselblad),
+ // one to return the fully decoded diff.
+ // All ifs depending on this bool will be optimized out by the compiler
+ template <typename BIT_STREAM, bool FULL_DECODE>
+ inline int decode(BIT_STREAM& bs) const {
+ static_assert(BitStreamTraits<BIT_STREAM>::canUseWithHuffmanTable,
+ "This BitStream specialization is not marked as usable here");
+ assert(FULL_DECODE == fullDecode);
+
+ const auto got = getSymbol(bs);
+ const unsigned codeId = got.second;
+
+ const int diff_l = codeValues[codeId];
+
+ if (!FULL_DECODE)
+ return diff_l;
+
+ if (diff_l == 16) {
+ if (fixDNGBug16)
+ bs.skipBits(16);
+ return -32768;
+ }
+
+ return diff_l ? signExtended(bs.getBits(diff_l), diff_l) : 0;
+ }
+};
+
+} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/JpegDecompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/JpegDecompressor.cpp
index 63d56b50d..13fabb6cb 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/JpegDecompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/JpegDecompressor.cpp
@@ -96,9 +96,10 @@ static void jpeg_mem_src_int(j_decompress_ptr cinfo,
#endif
[[noreturn]] METHODDEF(void) my_error_throw(j_common_ptr cinfo) {
- char buf[JMSG_LENGTH_MAX] = {0};
- (*cinfo->err->format_message)(cinfo, buf);
- ThrowRDE("JPEG decoder error: %s", buf);
+ std::array<char, JMSG_LENGTH_MAX> buf;
+ buf.fill(0);
+ (*cinfo->err->format_message)(cinfo, buf.data());
+ ThrowRDE("JPEG decoder error: %s", buf.data());
}
struct JpegDecompressor::JpegDecompressStruct : jpeg_decompress_struct {
@@ -130,9 +131,11 @@ void JpegDecompressor::decode(uint32 offX,
ThrowRDE("Component count doesn't match");
int row_stride = dinfo.output_width * dinfo.output_components;
- unique_ptr<uchar8[], decltype(&alignedFree)> complete_buffer(
- alignedMallocArray<uchar8, 16>(dinfo.output_height, row_stride),
- &alignedFree);
+ unique_ptr<uchar8[], // NOLINT
+ decltype(&alignedFree)>
+ complete_buffer(
+ alignedMallocArray<uchar8, 16>(dinfo.output_height, row_stride),
+ &alignedFree);
while (dinfo.output_scanline < dinfo.output_height) {
buffer[0] = static_cast<JSAMPROW>(
&complete_buffer[static_cast<size_t>(dinfo.output_scanline) *
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/JpegDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/JpegDecompressor.h
index ed1ddfd27..114c6ed89 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/JpegDecompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/JpegDecompressor.h
@@ -27,9 +27,8 @@
#include "common/Common.h" // for uint32
#include "common/RawImage.h" // for RawImage
#include "decompressors/AbstractDecompressor.h" // for AbstractDecompressor
-#include "io/Buffer.h" // for Buffer, Buffer::size_type
#include "io/ByteStream.h" // for ByteStream
-#include "io/Endianness.h" // for getHostEndianness
+#include "io/Endianness.h" // for Endianness, Endianne...
#include <utility> // for move
namespace rawspeed {
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/KodakDecompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/KodakDecompressor.cpp
index 81388f8e3..7d19a5603 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/KodakDecompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/KodakDecompressor.cpp
@@ -20,23 +20,24 @@
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "rawspeedconfig.h"
#include "decompressors/KodakDecompressor.h"
-#include "common/RawImage.h" // for RawImage
-#include "decoders/RawDecoderException.h" // for RawDecoderException (ptr o...
+#include "common/Point.h" // for iPoint2D
+#include "common/RawImage.h" // for RawImage, RawImageData
+#include "decoders/RawDecoderException.h" // for ThrowRDE
#include "decompressors/HuffmanTable.h" // for HuffmanTable
#include "io/ByteStream.h" // for ByteStream
#include <algorithm> // for min
#include <array> // for array
#include <cassert> // for assert
+#include <utility> // for move
namespace rawspeed {
constexpr int KodakDecompressor::segment_size;
KodakDecompressor::KodakDecompressor(const RawImage& img, ByteStream bs,
- bool uncorrectedRawValues_)
- : mRaw(img), input(std::move(bs)),
+ int bps_, bool uncorrectedRawValues_)
+ : mRaw(img), input(std::move(bs)), bps(bps_),
uncorrectedRawValues(uncorrectedRawValues_) {
if (mRaw->getCpp() != 1 || mRaw->getDataType() != TYPE_USHORT16 ||
mRaw->getBpp() != 2)
@@ -47,6 +48,9 @@ KodakDecompressor::KodakDecompressor(const RawImage& img, ByteStream bs,
ThrowRDE("Unexpected image dimensions found: (%u; %u)", mRaw->dim.x,
mRaw->dim.y);
+ if (bps != 10 && bps != 12)
+ ThrowRDE("Unexpected bits per sample: %i", bps);
+
// Lower estimate: this decompressor requires *at least* half a byte
// per output pixel
input.check(mRaw->dim.area() / 2ULL);
@@ -83,6 +87,7 @@ KodakDecompressor::decodeSegment(const uint32 bsize) {
}
for (uint32 i = 0; i < bsize; i++) {
uint32 len = blen[i];
+ assert(len < 16);
if (bits < len) {
for (uint32 j = 0; j < 32; j += 8) {
@@ -95,9 +100,8 @@ KodakDecompressor::decodeSegment(const uint32 bsize) {
uint32 diff = static_cast<uint32>(bitbuf) & (0xffff >> (16 - len));
bitbuf >>= len;
bits -= len;
- diff = len != 0 ? HuffmanTable::signExtended(diff, len) : diff;
- out[i] = diff;
+ out[i] = len != 0 ? HuffmanTable::signExtended(diff, len) : int(diff);
}
return out;
@@ -116,15 +120,15 @@ void KodakDecompressor::decompress() {
const segment buf = decodeSegment(len);
- std::array<uint32, 2> pred;
+ std::array<int, 2> pred;
pred.fill(0);
for (uint32 i = 0; i < len; i++) {
pred[i & 1] += buf[i];
- ushort16 value = pred[i & 1];
- if (value > 1023)
- ThrowRDE("Value out of bounds %d", value);
+ int value = pred[i & 1];
+ if (unsigned(value) >= (1U << bps))
+ ThrowRDE("Value out of bounds %d (bps = %i)", value, bps);
if (uncorrectedRawValues)
dest[x + i] = value;
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/KodakDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/KodakDecompressor.h
index 9b89edc12..14bc4f0bf 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/KodakDecompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/KodakDecompressor.h
@@ -33,15 +33,16 @@ namespace rawspeed {
class KodakDecompressor final : public AbstractDecompressor {
RawImage mRaw;
ByteStream input;
+ int bps;
bool uncorrectedRawValues;
static constexpr int segment_size = 256; // pixels
- using segment = std::array<ushort16, segment_size>;
+ using segment = std::array<short16, segment_size>;
segment decodeSegment(uint32 bsize);
public:
- KodakDecompressor(const RawImage& img, ByteStream bs,
+ KodakDecompressor(const RawImage& img, ByteStream bs, int bps,
bool uncorrectedRawValues_);
void decompress();
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/LJpegDecompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/LJpegDecompressor.cpp
index fc037af73..69572bf16 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/LJpegDecompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/LJpegDecompressor.cpp
@@ -20,12 +20,13 @@
*/
#include "decompressors/LJpegDecompressor.h"
-#include "common/Common.h" // for uint32, unroll_loop, ushort16
+#include "common/Common.h" // for unroll_loop, uint32, ushort16
#include "common/Point.h" // for iPoint2D
#include "common/RawImage.h" // for RawImage, RawImageData
#include "decoders/RawDecoderException.h" // for ThrowRDE
-#include "io/BitPumpJPEG.h" // for BitPumpJPEG
-#include <algorithm> // for min, copy_n
+#include "io/BitPumpJPEG.h" // for BitPumpJPEG, BitStream<>::...
+#include <algorithm> // for copy_n
+#include <cassert> // for assert
using std::copy_n;
@@ -94,38 +95,62 @@ void LJpegDecompressor::decodeScan()
if ((mRaw->getCpp() * (mRaw->dim.x - offX)) < frame.cps)
ThrowRDE("Got less pixels than the components per sample");
- const auto tilePixelBlocks = mRaw->getCpp() * w;
- if (tilePixelBlocks % frame.cps != 0) {
- ThrowRDE("Tile component width (%u) is not multiple of LJpeg CPS (%u)",
- tilePixelBlocks, frame.cps);
- }
+ // How many output pixels are we expected to produce, as per DNG tiling?
+ const auto tileRequiredWidth = mRaw->getCpp() * w;
- wBlocks = tilePixelBlocks / frame.cps;
- if (frame.w < wBlocks || frame.h < h) {
+ // How many full pixel blocks do we need to consume for that?
+ const auto blocksToConsume = roundUpDivision(tileRequiredWidth, frame.cps);
+ if (frame.w < blocksToConsume || frame.h < h) {
ThrowRDE("LJpeg frame (%u, %u) is smaller than expected (%u, %u)",
- frame.cps * frame.w, frame.h, tilePixelBlocks, h);
+ frame.cps * frame.w, frame.h, tileRequiredWidth, h);
}
- switch (frame.cps) {
- case 2:
- decodeN<2>();
- break;
- case 3:
- decodeN<3>();
- break;
- case 4:
- decodeN<4>();
- break;
- default:
- ThrowRDE("Unsupported number of components: %u", frame.cps);
+ // How many full pixel blocks will we produce?
+ fullBlocks = tileRequiredWidth / frame.cps; // Truncating division!
+ // Do we need to also produce part of a block?
+ trailingPixels = tileRequiredWidth % frame.cps;
+
+ if (trailingPixels == 0) {
+ switch (frame.cps) {
+ case 1:
+ decodeN<1>();
+ break;
+ case 2:
+ decodeN<2>();
+ break;
+ case 3:
+ decodeN<3>();
+ break;
+ case 4:
+ decodeN<4>();
+ break;
+ default:
+ ThrowRDE("Unsupported number of components: %u", frame.cps);
+ }
+ } else /* trailingPixels != 0 */ {
+ // FIXME: using different function just for one tile likely causes
+ // i-cache misses and whatnot. Need to check how not splitting it into
+ // two different functions affects performance of the normal case.
+ switch (frame.cps) {
+ // Naturally can't happen for CPS=1.
+ case 2:
+ decodeN<2, /*WeirdWidth=*/true>();
+ break;
+ case 3:
+ decodeN<3, /*WeirdWidth=*/true>();
+ break;
+ case 4:
+ decodeN<4, /*WeirdWidth=*/true>();
+ break;
+ default:
+ ThrowRDE("Unsupported number of components: %u", frame.cps);
+ }
}
}
// N_COMP == number of components (2, 3 or 4)
-template <int N_COMP>
-void LJpegDecompressor::decodeN()
-{
+template <int N_COMP, bool WeirdWidth> void LJpegDecompressor::decodeN() {
assert(mRaw->getCpp() > 0);
assert(N_COMP > 0);
assert(N_COMP >= mRaw->getCpp());
@@ -160,14 +185,38 @@ void LJpegDecompressor::decodeN()
// the predictor for the next line is the start of this line
predNext = dest;
- // For x, we first process all pixels within the image buffer ...
- for (unsigned x = 0; x < wBlocks; ++x) {
+ unsigned x = 0;
+
+ // For x, we first process all full pixel blocks within the image buffer ...
+ for (; x < fullBlocks; ++x) {
unroll_loop<N_COMP>([&](int i) {
*dest++ = pred[i] += ht[i]->decodeNext(bitStream);
});
}
+
+ // Sometimes we also need to consume one more block, and produce part of it.
+ if /*constexpr*/ (WeirdWidth) {
+ // FIXME: evaluate i-cache implications due to this being compile-time.
+ static_assert(N_COMP > 1 || !WeirdWidth,
+ "can't want part of 1-pixel-wide block");
+ // Some rather esoteric DNG's have odd dimensions, e.g. width % 2 = 1.
+ // We may end up needing just part of last N_COMP pixels.
+ assert(trailingPixels > 0);
+ assert(trailingPixels < N_COMP);
+ unsigned c = 0;
+ for (; c < trailingPixels; ++c) {
+ *dest++ = pred[c] += ht[c]->decodeNext(bitStream);
+ }
+ // Discard the rest of the block.
+ assert(c < N_COMP);
+ for (; c < N_COMP; ++c) {
+ ht[c]->decodeNext(bitStream);
+ }
+ ++x; // We did just process one more block.
+ }
+
// ... and discard the rest.
- for (unsigned x = wBlocks; x < frame.w; ++x) {
+ for (; x < frame.w; ++x) {
unroll_loop<N_COMP>([&](int i) {
ht[i]->decodeNext(bitStream);
});
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/LJpegDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/LJpegDecompressor.h
index 41e51c1e5..d2783a85b 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/LJpegDecompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/LJpegDecompressor.h
@@ -22,11 +22,10 @@
#include "common/Common.h" // for uint32
#include "decompressors/AbstractLJpegDecompressor.h" // for AbstractLJpegDe...
-#include "io/Buffer.h" // for Buffer, Buffer:...
-#include "io/ByteStream.h" // for ByteStream
namespace rawspeed {
+class ByteStream;
class RawImage;
// Decompresses Lossless JPEGs, with 2-4 components
@@ -34,20 +33,21 @@ class RawImage;
class LJpegDecompressor final : public AbstractLJpegDecompressor
{
void decodeScan() override;
- template<int N_COMP> void decodeN();
+ template <int N_COMP, bool WeirdWidth = false> void decodeN();
uint32 offX = 0;
uint32 offY = 0;
uint32 w = 0;
uint32 h = 0;
- uint32 wBlocks = 0;
+ uint32 fullBlocks = 0;
+ uint32 trailingPixels = 0;
public:
LJpegDecompressor(const ByteStream& bs, const RawImage& img);
void decode(uint32 offsetX, uint32 offsetY, uint32 width, uint32 height,
- bool fixDng16Bug);
+ bool fixDng16Bug_);
};
} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/NikonDecompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/NikonDecompressor.cpp
index d8c0d7962..f07a60f81 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/NikonDecompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/NikonDecompressor.cpp
@@ -19,44 +19,352 @@
*/
#include "decompressors/NikonDecompressor.h"
-#include "common/Common.h" // for uint32, ushort16, clampBits
+#include "common/Common.h" // for uint32, clampBits, ushort16
#include "common/Point.h" // for iPoint2D
-#include "common/RawImage.h" // for RawImage, RawImageData, RawI...
+#include "common/RawImage.h" // for RawImage, RawImageData
#include "decoders/RawDecoderException.h" // for ThrowRDE
#include "decompressors/HuffmanTable.h" // for HuffmanTable
-#include "io/BitPumpMSB.h" // for BitPumpMSB, BitStream<>::fil...
+#include "io/BitPumpMSB.h" // for BitPumpMSB, BitStream<>::f...
#include "io/Buffer.h" // for Buffer
#include "io/ByteStream.h" // for ByteStream
-#include <cstdio> // for size_t, NULL
-#include <vector> // for vector, allocator
+#include <cassert> // for assert
+#include <cstdio> // for size_t
+#include <vector> // for vector
namespace rawspeed {
-const uchar8 NikonDecompressor::nikon_tree[][2][16] = {
- {/* 12-bit lossy */
- {0, 1, 5, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0},
- {5, 4, 3, 6, 2, 7, 1, 0, 8, 9, 11, 10, 12}},
- {/* 12-bit lossy after split */
- {0, 1, 5, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0},
- {0x39, 0x5a, 0x38, 0x27, 0x16, 5, 4, 3, 2, 1, 0, 11, 12, 12}},
- {/* 12-bit lossless */
- {0, 1, 4, 2, 3, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {5, 4, 6, 3, 7, 2, 8, 1, 9, 0, 10, 11, 12}},
- {/* 14-bit lossy */
- {0, 1, 4, 3, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0},
- {5, 6, 4, 7, 8, 3, 9, 2, 1, 0, 10, 11, 12, 13, 14}},
- {/* 14-bit lossy after split */
- {0, 1, 5, 1, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0},
- {8, 0x5c, 0x4b, 0x3a, 0x29, 7, 6, 5, 4, 3, 2, 1, 0, 13, 14}},
- {/* 14-bit lossless */
- {0, 1, 4, 2, 2, 3, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0},
- {7, 6, 8, 5, 9, 4, 10, 3, 11, 12, 2, 0, 1, 13, 14}},
+const std::array<std::array<std::array<uchar8, 16>, 2>, 6>
+ NikonDecompressor::nikon_tree = {{
+ {{/* 12-bit lossy */
+ {0, 1, 5, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0},
+ {5, 4, 3, 6, 2, 7, 1, 0, 8, 9, 11, 10, 12}}},
+ {{/* 12-bit lossy after split */
+ {0, 1, 5, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0},
+ {0x39, 0x5a, 0x38, 0x27, 0x16, 5, 4, 3, 2, 1, 0, 11, 12, 12}}},
+ {{/* 12-bit lossless */
+ {0, 1, 4, 2, 3, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {5, 4, 6, 3, 7, 2, 8, 1, 9, 0, 10, 11, 12}}},
+ {{/* 14-bit lossy */
+ {0, 1, 4, 3, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0},
+ {5, 6, 4, 7, 8, 3, 9, 2, 1, 0, 10, 11, 12, 13, 14}}},
+ {{/* 14-bit lossy after split */
+ {0, 1, 5, 1, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0},
+ {8, 0x5c, 0x4b, 0x3a, 0x29, 7, 6, 5, 4, 3, 2, 1, 0, 13, 14}}},
+ {{/* 14-bit lossless */
+ {0, 1, 4, 2, 2, 3, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0},
+ {7, 6, 8, 5, 9, 4, 10, 3, 11, 12, 2, 0, 1, 13, 14}}},
+ }};
+
+namespace {
+
+const std::array<uint32, 32> bitMask = {
+ {0xffffffff, 0x7fffffff, 0x3fffffff, 0x1fffffff, 0x0fffffff, 0x07ffffff,
+ 0x03ffffff, 0x01ffffff, 0x00ffffff, 0x007fffff, 0x003fffff, 0x001fffff,
+ 0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
+ 0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff,
+ 0x000000ff, 0x0000007f, 0x0000003f, 0x0000001f, 0x0000000f, 0x00000007,
+ 0x00000003, 0x00000001}};
+
+class NikonLASDecompressor {
+ bool mUseBigtable = true;
+ bool mDNGCompatible = false;
+
+ struct HuffmanTable {
+ /*
+ * These two fields directly represent the contents of a JPEG DHT
+ * marker
+ */
+ std::array<uint32, 17> bits;
+ std::array<uint32, 256> huffval;
+
+ /*
+ * The remaining fields are computed from the above to allow more
+ * efficient coding and decoding. These fields should be considered
+ * private to the Huffman compression & decompression modules.
+ */
+
+ std::array<ushort16, 17> mincode;
+ std::array<int, 18> maxcode;
+ std::array<short, 17> valptr;
+ std::array<uint32, 256> numbits;
+ std::vector<int> bigTable;
+ bool initialized;
+ } dctbl1;
+
+ void createHuffmanTable() {
+ int p;
+ int i;
+ int l;
+ int lastp;
+ int si;
+ std::array<char, 257> huffsize;
+ std::array<ushort16, 257> huffcode;
+ ushort16 code;
+ int size;
+ int value;
+ int ll;
+ int ul;
+
+ /*
+ * Figure C.1: make table of Huffman code length for each symbol
+ * Note that this is in code-length order.
+ */
+ p = 0;
+ for (l = 1; l <= 16; l++) {
+ for (i = 1; i <= static_cast<int>(dctbl1.bits[l]); i++) {
+ huffsize[p++] = static_cast<char>(l);
+ if (p > 256)
+ ThrowRDE("LJpegDecompressor::createHuffmanTable: Code length too "
+ "long. Corrupt data.");
+ }
+ }
+ huffsize[p] = 0;
+ lastp = p;
+
+ /*
+ * Figure C.2: generate the codes themselves
+ * Note that this is in code-length order.
+ */
+ code = 0;
+ si = huffsize[0];
+ p = 0;
+ while (huffsize[p]) {
+ while ((static_cast<int>(huffsize[p])) == si) {
+ huffcode[p++] = code;
+ code++;
+ }
+ code <<= 1;
+ si++;
+ if (p > 256)
+ ThrowRDE("createHuffmanTable: Code length too long. Corrupt data.");
+ }
+
+ /*
+ * Figure F.15: generate decoding tables
+ */
+ dctbl1.mincode[0] = 0;
+ dctbl1.maxcode[0] = 0;
+ p = 0;
+ for (l = 1; l <= 16; l++) {
+ if (dctbl1.bits[l]) {
+ dctbl1.valptr[l] = p;
+ dctbl1.mincode[l] = huffcode[p];
+ p += dctbl1.bits[l];
+ dctbl1.maxcode[l] = huffcode[p - 1];
+ } else {
+ dctbl1.valptr[l] =
+ 0xff; // This check must be present to avoid crash on junk
+ dctbl1.maxcode[l] = -1;
+ }
+ if (p > 256)
+ ThrowRDE("createHuffmanTable: Code length too long. Corrupt data.");
+ }
+
+ /*
+ * We put in this value to ensure HuffDecode terminates.
+ */
+ dctbl1.maxcode[17] = 0xFFFFFL;
+
+ /*
+ * Build the numbits, value lookup tables.
+ * These table allow us to gather 8 bits from the bits stream,
+ * and immediately lookup the size and value of the huffman codes.
+ * If size is zero, it means that more than 8 bits are in the huffman
+ * code (this happens about 3-4% of the time).
+ */
+ dctbl1.numbits.fill(0);
+ for (p = 0; p < lastp; p++) {
+ size = huffsize[p];
+ if (size <= 8) {
+ value = dctbl1.huffval[p];
+ code = huffcode[p];
+ ll = code << (8 - size);
+ if (size < 8) {
+ ul = ll | bitMask[24 + size];
+ } else {
+ ul = ll;
+ }
+ if (ul > 256 || ll > ul)
+ ThrowRDE("createHuffmanTable: Code length too long. Corrupt data.");
+ for (i = ll; i <= ul; i++) {
+ dctbl1.numbits[i] = size | (value << 4);
+ }
+ }
+ }
+ if (mUseBigtable)
+ createBigTable();
+ dctbl1.initialized = true;
+ }
+
+ /************************************
+ * Bitable creation
+ *
+ * This is expanding the concept of fast lookups
+ *
+ * A complete table for 14 arbitrary bits will be
+ * created that enables fast lookup of number of bits used,
+ * and final delta result.
+ * Hit rate is about 90-99% for typical LJPEGS, usually about 98%
+ *
+ ************************************/
+
+ void createBigTable() {
+ const uint32 bits =
+ 14; // HuffDecode functions must be changed, if this is modified.
+ const uint32 size = 1 << bits;
+ int rv = 0;
+ int temp;
+ uint32 l;
+
+ dctbl1.bigTable.resize(size);
+ for (uint32 i = 0; i < size; i++) {
+ ushort16 input = i << 2; // Calculate input value
+ int code = input >> 8; // Get 8 bits
+ uint32 val = dctbl1.numbits[code];
+ l = val & 15;
+ if (l) {
+ rv = val >> 4;
+ } else {
+ l = 8;
+ while (code > dctbl1.maxcode[l]) {
+ temp = input >> (15 - l) & 1;
+ code = (code << 1) | temp;
+ l++;
+ }
+
+ /*
+ * With garbage input we may reach the sentinel value l = 17.
+ */
+
+ if (l > 16 || dctbl1.valptr[l] == 0xff) {
+ dctbl1.bigTable[i] = 0xff;
+ continue;
+ }
+ rv = dctbl1.huffval[dctbl1.valptr[l] + (code - dctbl1.mincode[l])];
+ }
+
+ if (rv == 16) {
+ if (mDNGCompatible)
+ dctbl1.bigTable[i] = (-(32768 << 8)) | (16 + l);
+ else
+ dctbl1.bigTable[i] = (-(32768 << 8)) | l;
+ continue;
+ }
+
+ if (rv + l > bits) {
+ dctbl1.bigTable[i] = 0xff;
+ continue;
+ }
+
+ if (rv) {
+ int x = input >> (16 - l - rv) & ((1 << rv) - 1);
+ if ((x & (1 << (rv - 1))) == 0)
+ x -= (1 << rv) - 1;
+ dctbl1.bigTable[i] =
+ static_cast<int>((static_cast<unsigned>(x) << 8) | (l + rv));
+ } else {
+ dctbl1.bigTable[i] = l;
+ }
+ }
+ }
+
+public:
+ uint32 setNCodesPerLength(const Buffer& data) {
+ uint32 acc = 0;
+ for (uint32 i = 0; i < 16; i++) {
+ dctbl1.bits[i + 1] = data[i];
+ acc += dctbl1.bits[i + 1];
+ }
+ dctbl1.bits[0] = 0;
+ return acc;
+ }
+
+ void setCodeValues(const Buffer& data) {
+ for (uint32 i = 0; i < data.getSize(); i++)
+ dctbl1.huffval[i] = data[i];
+ }
+
+ void setup(bool fullDecode_, bool fixDNGBug16_) { createHuffmanTable(); }
+
+ /*
+ *--------------------------------------------------------------
+ *
+ * HuffDecode --
+ *
+ * Taken from Figure F.16: extract next coded symbol from
+ * input stream. This should becode a macro.
+ *
+ * Results:
+ * Next coded symbol
+ *
+ * Side effects:
+ * Bitstream is parsed.
+ *
+ *--------------------------------------------------------------
+ */
+ int decodeNext(BitPumpMSB& bits) { // NOLINT: google-runtime-references
+ int rv;
+ int l;
+ int temp;
+ int code;
+ unsigned val;
+
+ bits.fill();
+ code = bits.peekBitsNoFill(14);
+ val = static_cast<unsigned>(dctbl1.bigTable[code]);
+ if ((val & 0xff) != 0xff) {
+ bits.skipBitsNoFill(val & 0xff);
+ return static_cast<int>(val) >> 8;
+ }
+ rv = 0;
+ code = bits.peekBitsNoFill(8);
+ val = dctbl1.numbits[code];
+ l = val & 15;
+ if (l) {
+ bits.skipBitsNoFill(l);
+ rv = static_cast<int>(val) >> 4;
+ } else {
+ bits.skipBits(8);
+ l = 8;
+ while (code > dctbl1.maxcode[l]) {
+ temp = bits.getBitsNoFill(1);
+ code = (code << 1) | temp;
+ l++;
+ }
+
+ if (l > 16) {
+ ThrowRDE("Corrupt JPEG data: bad Huffman code:%u\n", l);
+ } else {
+ rv = dctbl1.huffval[dctbl1.valptr[l] + (code - dctbl1.mincode[l])];
+ }
+ }
+
+ if (rv == 16)
+ return -32768;
+
+ /*
+ * Section F.2.2.1: decode the difference and
+ * Figure F.12: extend sign bit
+ */
+ uint32 len = rv & 15;
+ uint32 shl = rv >> 4;
+ int diff = ((bits.getBits(len - shl) << 1) + 1) << shl >> 1;
+ if ((diff & (1 << (len - 1))) == 0)
+ diff -= (1 << len) - !shl;
+ return diff;
+ }
};
+} // namespace
+
std::vector<ushort16> NikonDecompressor::createCurve(ByteStream* metadata,
uint32 bitsPS, uint32 v0,
uint32 v1, uint32* split) {
+ // Nikon Z7 12/14 bit compressed hack.
+ if (v0 == 68 && v1 == 64)
+ bitsPS -= 2;
+
// 'curve' will hold a peace wise linearly interpolated function.
// there are 'csize' segments, each is 'step' values long.
// the very last value is not part of the used table but necessary
@@ -73,7 +381,7 @@ std::vector<ushort16> NikonDecompressor::createCurve(ByteStream* metadata,
if (csize > 1)
step = curve.size() / (csize - 1);
- if (v0 == 68 && v1 == 32 && step > 0) {
+ if (v0 == 68 && (v1 == 32 || v1 == 64) && step > 0) {
if ((csize - 1) * step != curve.size() - 1)
ThrowRDE("Bad curve segment count (%u)", csize);
@@ -114,15 +422,18 @@ std::vector<ushort16> NikonDecompressor::createCurve(ByteStream* metadata,
return curve;
}
-HuffmanTable NikonDecompressor::createHuffmanTable(uint32 huffSelect) {
- HuffmanTable ht;
- uint32 count = ht.setNCodesPerLength(Buffer(nikon_tree[huffSelect][0], 16));
- ht.setCodeValues(Buffer(nikon_tree[huffSelect][1], count));
+template <typename Huffman>
+Huffman NikonDecompressor::createHuffmanTable(uint32 huffSelect) {
+ Huffman ht;
+ uint32 count =
+ ht.setNCodesPerLength(Buffer(nikon_tree[huffSelect][0].data(), 16));
+ ht.setCodeValues(Buffer(nikon_tree[huffSelect][1].data(), count));
ht.setup(true, false);
return ht;
}
-NikonDecompressor::NikonDecompressor(const RawImage& raw, uint32 bitsPS_)
+NikonDecompressor::NikonDecompressor(const RawImage& raw, ByteStream metadata,
+ uint32 bitsPS_)
: mRaw(raw), bitsPS(bitsPS_) {
if (mRaw->getCpp() != 1 || mRaw->getDataType() != TYPE_USHORT16 ||
mRaw->getBpp() != 2)
@@ -140,18 +451,9 @@ NikonDecompressor::NikonDecompressor(const RawImage& raw, uint32 bitsPS_)
default:
ThrowRDE("Invalid bpp found: %u", bitsPS);
}
-}
-
-void NikonDecompressor::decompress(ByteStream metadata, const ByteStream& data,
- bool uncorrectedRawValues) {
- const iPoint2D& size = mRaw->dim;
uint32 v0 = metadata.getByte();
uint32 v1 = metadata.getByte();
- uint32 huffSelect = 0;
- uint32 split = 0;
- int pUp1[2];
- int pUp2[2];
writeLog(DEBUG_PRIO_EXTRA, "Nef version v0:%u, v1:%u", v0, v1);
@@ -168,33 +470,36 @@ void NikonDecompressor::decompress(ByteStream metadata, const ByteStream& data,
pUp2[0] = metadata.getU16();
pUp2[1] = metadata.getU16();
- HuffmanTable ht = createHuffmanTable(huffSelect);
+ curve = createCurve(&metadata, bitsPS, v0, v1, &split);
- auto curve = createCurve(&metadata, bitsPS, v0, v1, &split);
- RawImageCurveGuard curveHandler(&mRaw, curve, uncorrectedRawValues);
+ // If the 'split' happens outside of the image, it does not actually happen.
+ if (split >= static_cast<unsigned>(mRaw->dim.y))
+ split = 0;
+}
+
+template <typename Huffman>
+void NikonDecompressor::decompress(BitPumpMSB* bits, int start_y, int end_y) {
+ Huffman ht = createHuffmanTable<Huffman>(huffSelect);
- BitPumpMSB bits(data);
uchar8* draw = mRaw->getData();
uint32 pitch = mRaw->pitch;
int pLeft1 = 0;
int pLeft2 = 0;
- uint32 random = bits.peekBits(24);
- //allow gcc to devirtualize the calls below
+
+ // allow gcc to devirtualize the calls below
auto* rawdata = reinterpret_cast<RawImageDataU16*>(mRaw.get());
+ const iPoint2D& size = mRaw->dim;
assert(size.x % 2 == 0);
assert(size.x >= 2);
- for (uint32 y = 0; y < static_cast<unsigned>(size.y); y++) {
- if (split && y == split) {
- ht = createHuffmanTable(huffSelect + 1);
- }
+ for (uint32 y = start_y; y < static_cast<uint32>(end_y); y++) {
auto* dest =
reinterpret_cast<ushort16*>(&draw[y * pitch]); // Adjust destination
- pUp1[y&1] += ht.decodeNext(bits);
- pUp2[y&1] += ht.decodeNext(bits);
- pLeft1 = pUp1[y&1];
- pLeft2 = pUp2[y&1];
+ pUp1[y & 1] += ht.decodeNext(*bits);
+ pUp2[y & 1] += ht.decodeNext(*bits);
+ pLeft1 = pUp1[y & 1];
+ pLeft2 = pUp2[y & 1];
rawdata->setWithLookUp(clampBits(pLeft1, 15),
reinterpret_cast<uchar8*>(dest + 0), &random);
@@ -204,8 +509,8 @@ void NikonDecompressor::decompress(ByteStream metadata, const ByteStream& data,
dest += 2;
for (uint32 x = 2; x < static_cast<uint32>(size.x); x += 2) {
- pLeft1 += ht.decodeNext(bits);
- pLeft2 += ht.decodeNext(bits);
+ pLeft1 += ht.decodeNext(*bits);
+ pLeft2 += ht.decodeNext(*bits);
rawdata->setWithLookUp(clampBits(pLeft1, 15),
reinterpret_cast<uchar8*>(dest + 0), &random);
@@ -217,4 +522,23 @@ void NikonDecompressor::decompress(ByteStream metadata, const ByteStream& data,
}
}
+void NikonDecompressor::decompress(const ByteStream& data,
+ bool uncorrectedRawValues) {
+ RawImageCurveGuard curveHandler(&mRaw, curve, uncorrectedRawValues);
+
+ BitPumpMSB bits(data);
+
+ random = bits.peekBits(24);
+
+ assert(split == 0 || split < static_cast<unsigned>(mRaw->dim.y));
+
+ if (!split) {
+ decompress<HuffmanTable>(&bits, 0, mRaw->dim.y);
+ } else {
+ decompress<HuffmanTable>(&bits, 0, split);
+ huffSelect += 1;
+ decompress<NikonLASDecompressor>(&bits, split, mRaw->dim.y);
+ }
+}
+
} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/NikonDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/NikonDecompressor.h
index 6fde227a5..cfa1bb0e5 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/NikonDecompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/NikonDecompressor.h
@@ -20,36 +20,45 @@
#pragma once
-#include "common/Common.h" // for uint32
+#include "common/Common.h" // for uint32, ushort16
#include "common/RawImage.h" // for RawImage
#include "decompressors/AbstractDecompressor.h" // for AbstractDecompressor
+#include "io/BitPumpMSB.h" // for BitPumpMSB
#include <vector> // for vector
namespace rawspeed {
-class iPoint2D;
-
-class RawImage;
-
class ByteStream;
-class HuffmanTable;
-
class NikonDecompressor final : public AbstractDecompressor {
RawImage mRaw;
uint32 bitsPS;
+ uint32 huffSelect = 0;
+ uint32 split = 0;
+
+ std::array<int, 2> pUp1;
+ std::array<int, 2> pUp2;
+
+ std::vector<ushort16> curve;
+
+ uint32 random;
+
public:
- NikonDecompressor(const RawImage& raw, uint32 bitsPS);
+ NikonDecompressor(const RawImage& raw, ByteStream metadata, uint32 bitsPS);
- void decompress(ByteStream metadata, const ByteStream& data,
- bool uncorrectedRawValues);
+ void decompress(const ByteStream& data, bool uncorrectedRawValues);
private:
- static const uchar8 nikon_tree[][2][16];
+ static const std::array<std::array<std::array<uchar8, 16>, 2>, 6> nikon_tree;
static std::vector<ushort16> createCurve(ByteStream* metadata, uint32 bitsPS,
uint32 v0, uint32 v1, uint32* split);
- static HuffmanTable createHuffmanTable(uint32 huffSelect);
+
+ template <typename Huffman>
+ void decompress(BitPumpMSB* bits, int start_y, int end_y);
+
+ template <typename Huffman>
+ static Huffman createHuffmanTable(uint32 huffSelect);
};
} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/OlympusDecompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/OlympusDecompressor.cpp
index 3a8cc3399..5f6ceb680 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/OlympusDecompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/OlympusDecompressor.cpp
@@ -21,20 +21,19 @@
*/
#include "decompressors/OlympusDecompressor.h"
-#include "common/Common.h" // for uchar8
-#include "common/Point.h" // for iPoint2D
-#include "common/RawImage.h" // for RawImage
-#include "decoders/RawDecoderException.h" // for ThrowRDE
-#include "decompressors/AbstractDecompressor.h" // for RawDecom...
-#include "decompressors/HuffmanTable.h" // for HuffmanTable
-#include "io/BitPumpMSB.h" // for BitPumpMSB
-#include <algorithm> // for move
-#include <algorithm> // for min
-#include <array> // for array
-#include <cmath> // for signbit
-#include <cstdlib> // for abs
-#include <memory> // for unique_ptr
-#include <type_traits> // for enable_if, is_integer
+#include "common/Common.h" // for uint32, ushort16, uchar8
+#include "common/Point.h" // for iPoint2D
+#include "common/RawImage.h" // for RawImage, RawImageData
+#include "decoders/RawDecoderException.h" // for ThrowRDE
+#include "io/BitPumpMSB.h" // for BitPumpMSB
+#include "io/ByteStream.h" // for ByteStream
+#include <algorithm> // for min
+#include <array> // for array, array<>::value_type
+#include <cassert> // for assert
+#include <cmath> // for abs
+#include <cstdlib> // for abs
+#include <memory> // for unique_ptr
+#include <type_traits> // for enable_if_t, is_integral
namespace {
@@ -90,7 +89,7 @@ void OlympusDecompressor::decompress(ByteStream input) const {
int pitch = mRaw->pitch;
/* Build a table to quickly look up "high" value */
- std::unique_ptr<char[]> bittable(new char[4096]);
+ std::unique_ptr<char[]> bittable(new char[4096]); // NOLINT
for (i = 0; i < 4096; i++) {
int b = i;
@@ -146,9 +145,10 @@ void OlympusDecompressor::decompress(ByteStream input) const {
nw[c] = pred;
}
}
- dest[x] = pred + ((diff * 4) | low);
// Set predictor
- left[c] = dest[x];
+ left[c] = pred + ((diff * 4) | low);
+ // Set the pixel
+ dest[x] = left[c];
} else {
// Have local variables for values used several tiles
// (having a "ushort16 *dst_up" that caches dest[-pitch+((int)x)] is
@@ -166,10 +166,11 @@ void OlympusDecompressor::decompress(ByteStream input) const {
} else
pred = std::abs(leftMinusNw) > std::abs(upMinusNw) ? left[c] : up;
- dest[x] = pred + ((diff * 4) | low);
// Set predictors
- left[c] = dest[x];
+ left[c] = pred + ((diff * 4) | low);
nw[c] = up;
+ // Set the pixel
+ dest[x] = left[c];
}
// ODD PIXELS
@@ -208,7 +209,10 @@ void OlympusDecompressor::decompress(ByteStream input) const {
nw[c] = pred;
}
}
- dest[x] = left[c] = pred + ((diff * 4) | low);
+ // Set predictor
+ left[c] = pred + ((diff * 4) | low);
+ // Set the pixel
+ dest[x] = left[c];
} else {
int up = dest[-pitch + (static_cast<int>(x))];
int leftMinusNw = left[c] - nw[c];
@@ -224,8 +228,11 @@ void OlympusDecompressor::decompress(ByteStream input) const {
} else
pred = std::abs(leftMinusNw) > std::abs(upMinusNw) ? left[c] : up;
- dest[x] = left[c] = pred + ((diff * 4) | low);
+ // Set predictors
+ left[c] = pred + ((diff * 4) | low);
nw[c] = up;
+ // Set the pixel
+ dest[x] = left[c];
}
border = y_border;
}
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/OlympusDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/OlympusDecompressor.h
index bb9ebc1b6..c52561394 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/OlympusDecompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/OlympusDecompressor.h
@@ -22,11 +22,10 @@
#include "common/RawImage.h" // for RawImage
#include "decompressors/AbstractDecompressor.h" // for AbstractDecompressor
-#include "io/ByteStream.h" // for ByteStream
namespace rawspeed {
-class RawImage;
+class ByteStream;
class OlympusDecompressor final : public AbstractDecompressor {
RawImage mRaw;
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressor.cpp
index c7c982f13..a1dd4cd34 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressor.cpp
@@ -3,7 +3,7 @@
Copyright (C) 2009-2014 Klaus Post
Copyright (C) 2014 Pedro Côrte-Real
- Copyright (C) 2017 Roman Lebedev
+ Copyright (C) 2017-2018 Roman Lebedev
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -21,153 +21,230 @@
*/
#include "decompressors/PanasonicDecompressor.h"
-#include "common/Mutex.h" // for MutexLocker
-#include "common/Point.h" // for iPoint2D
-#include "common/RawImage.h" // for RawImage, RawImageData
-#include <algorithm> // for min, move
-#include <cstring> // for memcpy
-#include <vector> // for vector
+#include "common/Mutex.h" // for MutexLocker
+#include "common/Point.h" // for iPoint2D
+#include "common/RawImage.h" // for RawImage, RawImageData
+#include "decoders/RawDecoderException.h" // for ThrowRDE
+#include "io/Buffer.h" // for Buffer, Buffer::size_type
+#include <algorithm> // for generate_n, min
+#include <array> // for array
+#include <cassert> // for assert
+#include <cstddef> // for size_t
+#include <iterator> // for back_insert_iterator, back...
+#include <limits> // for numeric_limits
+#include <memory> // for allocator_traits<>::value_...
+#include <utility> // for move
+#include <vector> // for vector
namespace rawspeed {
+constexpr uint32 PanasonicDecompressor::BlockSize;
+
PanasonicDecompressor::PanasonicDecompressor(const RawImage& img,
const ByteStream& input_,
bool zero_is_not_bad,
- uint32 load_flags_)
+ uint32 section_split_offset_)
: AbstractParallelizedDecompressor(img), zero_is_bad(!zero_is_not_bad),
- load_flags(load_flags_) {
+ section_split_offset(section_split_offset_) {
if (mRaw->getCpp() != 1 || mRaw->getDataType() != TYPE_USHORT16 ||
mRaw->getBpp() != 2)
ThrowRDE("Unexpected component count / data type");
- if (!mRaw->dim.hasPositiveArea() || mRaw->dim.x % 14 != 0) {
+ if (!mRaw->dim.hasPositiveArea() || mRaw->dim.x % PixelsPerPacket != 0) {
ThrowRDE("Unexpected image dimensions found: (%i; %i)", mRaw->dim.x,
mRaw->dim.y);
}
- /*
- * Normally, we would check the image dimensions against some hardcoded
- * threshold. That is being done as poor man's attempt to catch
- * obviously-invalid raws, and avoid OOM's during fuzzing. However, there is
- * a better solution - actually check the size of input buffer to try and
- * guess whether the image size is valid or not. And in this case, we can do
- * that, because the compression rate is static and known.
- */
- // if (width > 5488 || height > 3912)
- // ThrowRDE("Too large image size: (%u; %u)", width, height);
-
- if (BufSize < load_flags)
- ThrowRDE("Bad load_flags: %u, less than BufSize (%u)", load_flags, BufSize);
+ if (BlockSize < section_split_offset)
+ ThrowRDE("Bad section_split_offset: %u, less than BlockSize (%u)",
+ section_split_offset, BlockSize);
// Naive count of bytes that given pixel count requires.
- // Do division first, because we know the remainder is always zero,
- // and the next multiplication won't overflow.
- assert(mRaw->dim.area() % 7ULL == 0ULL);
- const auto rawBytesNormal = (mRaw->dim.area() / 7ULL) * 8ULL;
- // If load_flags is zero, than that size is the size we need to read.
- // But if it is not, then we need to round up to multiple of BufSize, because
- // of splitting&rotation of each BufSize's slice in half at load_flags bytes.
+ assert(mRaw->dim.area() % PixelsPerPacket == 0);
+ const auto bytesTotal = (mRaw->dim.area() / PixelsPerPacket) * BytesPerPacket;
+ assert(bytesTotal > 0);
+
+ // If section_split_offset is zero, then that we need to read the normal
+ // amount of bytes. But if it is not, then we need to round up to multiple of
+ // BlockSize, because of splitting&rotation of each BlockSize's slice in half
+ // at section_split_offset bytes.
const auto bufSize =
- load_flags == 0 ? rawBytesNormal : roundUp(rawBytesNormal, BufSize);
+ section_split_offset == 0 ? bytesTotal : roundUp(bytesTotal, BlockSize);
+
+ if (bufSize > std::numeric_limits<ByteStream::size_type>::max())
+ ThrowRDE("Raw dimensions require input buffer larger than supported");
+
input = input_.peekStream(bufSize);
+
+ chopInputIntoBlocks();
}
-struct PanasonicDecompressor::PanaBitpump {
- ByteStream input;
+void PanasonicDecompressor::chopInputIntoBlocks() {
+ auto pixelToCoordinate = [width = mRaw->dim.x](unsigned pixel) -> iPoint2D {
+ return iPoint2D(pixel % width, pixel / width);
+ };
+
+ // If section_split_offset == 0, last block may not be full.
+ const auto blocksTotal = roundUpDivision(input.getRemainSize(), BlockSize);
+ assert(blocksTotal > 0);
+ assert(blocksTotal * PixelsPerBlock >= mRaw->dim.area());
+ blocks.reserve(blocksTotal);
+
+ unsigned currPixel = 0;
+ std::generate_n(std::back_inserter(blocks), blocksTotal,
+ [input = &input, &currPixel, pixelToCoordinate]() -> Block {
+ assert(input->getRemainSize() != 0);
+ const auto blockSize =
+ std::min(input->getRemainSize(), BlockSize);
+ assert(blockSize > 0);
+ assert(blockSize % BytesPerPacket == 0);
+ const auto packets = blockSize / BytesPerPacket;
+ assert(packets > 0);
+ const auto pixels = packets * PixelsPerPacket;
+ assert(pixels > 0);
+
+ ByteStream bs = input->getStream(blockSize);
+ iPoint2D beginCoord = pixelToCoordinate(currPixel);
+ currPixel += pixels;
+ iPoint2D endCoord = pixelToCoordinate(currPixel);
+ return {std::move(bs), beginCoord, endCoord};
+ });
+ assert(blocks.size() == blocksTotal);
+ assert(currPixel >= mRaw->dim.area());
+ assert(input.getRemainSize() == 0);
+
+ // Clamp the end coordinate for the last block.
+ blocks.back().endCoord = mRaw->dim;
+ blocks.back().endCoord.y -= 1;
+}
+
+class PanasonicDecompressor::ProxyStream {
+ ByteStream block;
+ const uint32 section_split_offset;
std::vector<uchar8> buf;
+
int vbits = 0;
- uint32 load_flags;
- PanaBitpump(ByteStream input_, int load_flags_)
- : input(std::move(input_)), load_flags(load_flags_) {
+ void parseBlock() {
+ assert(buf.empty());
+ assert(block.getRemainSize() <= BlockSize);
+ assert(section_split_offset <= BlockSize);
+
+ Buffer FirstSection = block.getBuffer(section_split_offset);
+ Buffer SecondSection = block.getBuffer(block.getRemainSize());
+
+ // get one more byte, so the return statement of getBits does not have
+ // to special case for accessing the last byte
+ buf.reserve(BlockSize + 1UL);
+
+ // First copy the second section. This makes it the first section.
+ buf.insert(buf.end(), SecondSection.begin(), SecondSection.end());
+ // Now append the original 1'st section right after the new 1'st section.
+ buf.insert(buf.end(), FirstSection.begin(), FirstSection.end());
+
+ assert(block.getRemainSize() == 0);
+
// get one more byte, so the return statement of getBits does not have
// to special case for accessing the last byte
- buf.resize(BufSize + 1UL);
+ buf.emplace_back(0);
}
- void skipBytes(int bytes) {
- int blocks = (bytes / BufSize) * BufSize;
- input.skipBytes(blocks);
- for (int i = blocks; i < bytes; i++)
- (void)getBits(8);
+public:
+ ProxyStream(ByteStream block_, int section_split_offset_)
+ : block(std::move(block_)), section_split_offset(section_split_offset_) {
+ parseBlock();
}
uint32 getBits(int nbits) {
- if (!vbits) {
- /* On truncated files this routine will just return just for the truncated
- * part of the file. Since there is no chance of affecting output buffer
- * size we allow the decoder to decode this
- */
- assert(BufSize >= load_flags);
- auto size = std::min(input.getRemainSize(), BufSize - load_flags);
- memcpy(buf.data() + load_flags, input.getData(size), size);
-
- size = std::min(input.getRemainSize(), load_flags);
- if (size != 0)
- memcpy(buf.data(), input.getData(size), size);
- }
vbits = (vbits - nbits) & 0x1ffff;
int byte = vbits >> 3 ^ 0x3ff0;
return (buf[byte] | buf[byte + 1UL] << 8) >> (vbits & 7) & ~(-(1 << nbits));
}
};
-void PanasonicDecompressor::decompressThreaded(
- const RawDecompressorThread* t) const {
- PanaBitpump bits(input, load_flags);
+void PanasonicDecompressor::processPixelPacket(
+ ProxyStream* bits, int y, ushort16* dest, int xbegin,
+ std::vector<uint32>* zero_pos) const {
+ int sh = 0;
- /* 9 + 1/7 bits per pixel */
- bits.skipBytes(8 * mRaw->dim.x * t->start / 7);
+ std::array<int, 2> pred;
+ pred.fill(0);
- assert(mRaw->dim.x % 14 == 0);
- const auto blocks = mRaw->dim.x / 14;
+ std::array<int, 2> nonz;
+ nonz.fill(0);
- std::vector<uint32> zero_pos;
- for (uint32 y = t->start; y < t->end; y++) {
- int sh = 0;
-
- auto* dest = reinterpret_cast<ushort16*>(mRaw->getData(0, y));
- for (int block = 0; block < blocks; block++) {
- std::array<int, 2> pred;
- pred.fill(0);
-
- std::array<int, 2> nonz;
- nonz.fill(0);
-
- int u = 0;
-
- for (int x = 0; x < 14; x++) {
- const int c = x & 1;
-
- if (u == 2) {
- sh = 4 >> (3 - bits.getBits(2));
- u = -1;
- }
-
- if (nonz[c]) {
- int j = bits.getBits(8);
- if (j) {
- pred[c] -= 0x80 << sh;
- if (pred[c] < 0 || sh == 4)
- pred[c] &= ~(-(1 << sh));
- pred[c] += j << sh;
- }
- } else {
- nonz[c] = bits.getBits(8);
- if (nonz[c] || x > 11)
- pred[c] = nonz[c] << 4 | bits.getBits(4);
- }
-
- *dest = pred[c];
-
- if (zero_is_bad && 0 == pred[c])
- zero_pos.push_back((y << 16) | (14 * block + x));
-
- u++;
- dest++;
+ int u = 0;
+
+ for (int p = 0; p < PixelsPerPacket; p++) {
+ const int c = p & 1;
+
+ if (u == 2) {
+ sh = 4 >> (3 - bits->getBits(2));
+ u = -1;
+ }
+
+ if (nonz[c]) {
+ int j = bits->getBits(8);
+ if (j) {
+ pred[c] -= 0x80 << sh;
+ if (pred[c] < 0 || sh == 4)
+ pred[c] &= ~(-(1 << sh));
+ pred[c] += j << sh;
}
+ } else {
+ nonz[c] = bits->getBits(8);
+ if (nonz[c] || p > 11)
+ pred[c] = nonz[c] << 4 | bits->getBits(4);
+ }
+
+ *dest = pred[c];
+
+ if (zero_is_bad && 0 == pred[c])
+ zero_pos->push_back((y << 16) | (xbegin + p));
+
+ u++;
+ dest++;
+ }
+}
+
+void PanasonicDecompressor::processBlock(const Block& block,
+ std::vector<uint32>* zero_pos) const {
+ ProxyStream bits(block.bs, section_split_offset);
+
+ for (int y = block.beginCoord.y; y <= block.endCoord.y; y++) {
+ int x = 0;
+ // First row may not begin at the first column.
+ if (block.beginCoord.y == y)
+ x = block.beginCoord.x;
+
+ int endx = mRaw->dim.x;
+ // Last row may end before the last column.
+ if (block.endCoord.y == y)
+ endx = block.endCoord.x;
+
+ auto* dest = reinterpret_cast<ushort16*>(mRaw->getData(x, y));
+
+ assert(x % PixelsPerPacket == 0);
+ assert(endx % PixelsPerPacket == 0);
+
+ for (; x < endx;) {
+ processPixelPacket(&bits, y, dest, x, zero_pos);
+
+ x += PixelsPerPacket;
+ dest += PixelsPerPacket;
}
}
+}
+
+void PanasonicDecompressor::decompressThreaded(
+ const RawDecompressorThread* t) const {
+ std::vector<uint32> zero_pos;
+
+ assert(!blocks.empty());
+ assert(t->start < t->end);
+ assert(t->end <= blocks.size());
+ for (size_t i = t->start; i < t->end; i++)
+ processBlock(blocks[i], &zero_pos);
if (zero_is_bad && !zero_pos.empty()) {
MutexLocker guard(&mRaw->mBadPixelMutex);
@@ -176,4 +253,9 @@ void PanasonicDecompressor::decompressThreaded(
}
}
+void PanasonicDecompressor::decompress() const {
+ assert(!blocks.empty());
+ startThreading(blocks.size());
+}
+
} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressor.h
index 27480d489..68ef1f020 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressor.h
@@ -1,7 +1,7 @@
/*
RawSpeed - RAW file decoder.
- Copyright (C) 2017 Roman Lebedev
+ Copyright (C) 2017-2018 Roman Lebedev
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@@ -21,33 +21,70 @@
#pragma once
#include "common/Common.h" // for uint32
+#include "common/Point.h" // for iPoint2D
#include "decompressors/AbstractParallelizedDecompressor.h" // for Abstract...
#include "io/ByteStream.h" // for ByteStream
+#include <utility> // for move
+#include <vector> // for vector
namespace rawspeed {
class RawImage;
class PanasonicDecompressor final : public AbstractParallelizedDecompressor {
- static constexpr uint32 BufSize = 0x4000;
- struct PanaBitpump;
+ static constexpr uint32 BlockSize = 0x4000;
- void decompressThreaded(const RawDecompressorThread* t) const final;
+ static constexpr int PixelsPerPacket = 14;
+
+ static constexpr uint32 BytesPerPacket = 16;
+
+ static constexpr uint32 PacketsPerBlock = BlockSize / BytesPerPacket;
+
+ static constexpr uint32 PixelsPerBlock = PixelsPerPacket * PacketsPerBlock;
+
+ class ProxyStream;
ByteStream input;
bool zero_is_bad;
// The RW2 raw image buffer is split into sections of BufSize bytes.
- // If load_flags is 0, then last section is not nessesairly full.
- // If load_flags is not 0, then each section has two parts:
- // bytes: [0..load_flags-1][load_flags..BufSize-1]
+ // If section_split_offset is 0, then the last section is not neccesarily
+ // full. If section_split_offset is not 0, then each section has two parts:
+ // bytes: [0..section_split_offset-1][section_split_offset..BufSize-1]
// pixels: [a..b][0..a-1]
// I.e. these two parts need to be swapped around.
- uint32 load_flags;
+ uint32 section_split_offset;
+
+ struct Block {
+ ByteStream bs;
+ iPoint2D beginCoord;
+ // The rectangle is an incorrect representation. All the rows
+ // between the first and last one span the entire width of the image.
+ iPoint2D endCoord;
+
+ Block() = default;
+ Block(ByteStream&& bs_, iPoint2D beginCoord_, iPoint2D endCoord_)
+ : bs(std::move(bs_)), beginCoord(beginCoord_), endCoord(endCoord_) {}
+ };
+
+ // If really wanted, this vector could be avoided,
+ // and each Block computed on-the-fly
+ std::vector<Block> blocks;
+
+ void chopInputIntoBlocks();
+
+ void processPixelPacket(ProxyStream* bits, int y, ushort16* dest, int xbegin,
+ std::vector<uint32>* zero_pos) const;
+
+ void processBlock(const Block& block, std::vector<uint32>* zero_pos) const;
+
+ void decompressThreaded(const RawDecompressorThread* t) const final;
public:
PanasonicDecompressor(const RawImage& img, const ByteStream& input_,
- bool zero_is_not_bad, uint32 load_flags_);
+ bool zero_is_not_bad, uint32 section_split_offset_);
+
+ void decompress() const final;
};
} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressorV5.cpp b/src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressorV5.cpp
new file mode 100644
index 000000000..a1518a545
--- /dev/null
+++ b/src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressorV5.cpp
@@ -0,0 +1,255 @@
+/*
+ RawSpeed - RAW file decoder.
+
+ Copyright (C) 2018 Alexey Danilchenko
+ Copyright (C) 2018 Stefan Hoffmeister
+ Copyright (C) 2018 Roman Lebedev
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#include "decompressors/PanasonicDecompressorV5.h"
+#include "common/Point.h" // for iPoint2D
+#include "common/RawImage.h" // for RawImage, RawImageData
+#include "decoders/RawDecoderException.h" // for ThrowRDE
+#include "io/BitPumpLSB.h" // for BitPumpLSB
+#include "io/Buffer.h" // for Buffer, DataBuffer
+#include <algorithm> // for generate_n
+#include <cassert> // for assert
+#include <iterator> // for back_insert_iterator, back...
+#include <memory> // for allocator_traits<>::value_...
+#include <utility> // for move
+#include <vector> // for vector
+
+namespace rawspeed {
+
+struct PanasonicDecompressorV5::PacketDsc {
+ int bps;
+ int pixelsPerPacket;
+
+ constexpr PacketDsc();
+ explicit constexpr PacketDsc(int bps_)
+ : bps(bps_),
+ pixelsPerPacket(PanasonicDecompressorV5::bitsPerPacket / bps) {
+ // NOTE: the division is truncating. There may be some padding bits left.
+ }
+};
+
+constexpr PanasonicDecompressorV5::PacketDsc
+ PanasonicDecompressorV5::TwelveBitPacket =
+ PanasonicDecompressorV5::PacketDsc(/*bps=*/12);
+constexpr PanasonicDecompressorV5::PacketDsc
+ PanasonicDecompressorV5::FourteenBitPacket =
+ PanasonicDecompressorV5::PacketDsc(/*bps=*/14);
+
+PanasonicDecompressorV5::PanasonicDecompressorV5(const RawImage& img,
+ const ByteStream& input_,
+ uint32 bps_)
+ : AbstractParallelizedDecompressor(img), bps(bps_) {
+ if (mRaw->getCpp() != 1 || mRaw->getDataType() != TYPE_USHORT16 ||
+ mRaw->getBpp() != 2)
+ ThrowRDE("Unexpected component count / data type");
+
+ const PacketDsc* dsc = nullptr;
+ switch (bps) {
+ case 12:
+ dsc = &TwelveBitPacket;
+ break;
+ case 14:
+ dsc = &FourteenBitPacket;
+ break;
+ default:
+ ThrowRDE("Unsupported bps: %u", bps);
+ }
+
+ if (!mRaw->dim.hasPositiveArea() || mRaw->dim.x % dsc->pixelsPerPacket != 0) {
+ ThrowRDE("Unexpected image dimensions found: (%i; %i)", mRaw->dim.x,
+ mRaw->dim.y);
+ }
+
+ // How many pixel packets does the specified pixel count require?
+ assert(mRaw->dim.area() % dsc->pixelsPerPacket == 0);
+ const auto numPackets = mRaw->dim.area() / dsc->pixelsPerPacket;
+ assert(numPackets > 0);
+
+ // And how many blocks that would be? Last block may not be full, pad it.
+ numBlocks = roundUpDivision(numPackets, PacketsPerBlock);
+ assert(numBlocks > 0);
+
+ // How many full blocks does the input contain? This is truncating division.
+ const auto haveBlocks = input_.getRemainSize() / BlockSize;
+
+ // Does the input contain enough blocks?
+ if (haveBlocks < numBlocks)
+ ThrowRDE("Unsufficient count of input blocks for a given image");
+
+ // We only want those blocks we need, no extras.
+ input = input_.peekStream(numBlocks, BlockSize);
+
+ chopInputIntoBlocks(*dsc);
+}
+
+void PanasonicDecompressorV5::chopInputIntoBlocks(const PacketDsc& dsc) {
+ auto pixelToCoordinate = [width = mRaw->dim.x](unsigned pixel) -> iPoint2D {
+ return iPoint2D(pixel % width, pixel / width);
+ };
+
+ assert(numBlocks * BlockSize == input.getRemainSize());
+ blocks.reserve(numBlocks);
+
+ const auto pixelsPerBlock = dsc.pixelsPerPacket * PacketsPerBlock;
+ assert((numBlocks - 1U) * pixelsPerBlock < mRaw->dim.area());
+ assert(numBlocks * pixelsPerBlock >= mRaw->dim.area());
+
+ unsigned currPixel = 0;
+ std::generate_n(std::back_inserter(blocks), numBlocks,
+ [input = &input, &currPixel, pixelToCoordinate,
+ pixelsPerBlock]() -> Block {
+ ByteStream bs = input->getStream(BlockSize);
+ iPoint2D beginCoord = pixelToCoordinate(currPixel);
+ currPixel += pixelsPerBlock;
+ iPoint2D endCoord = pixelToCoordinate(currPixel);
+ return {std::move(bs), beginCoord, endCoord};
+ });
+ assert(blocks.size() == numBlocks);
+ assert(currPixel >= mRaw->dim.area());
+ assert(input.getRemainSize() == 0);
+
+ // Clamp the end coordinate for the last block.
+ blocks.back().endCoord = mRaw->dim;
+ blocks.back().endCoord.y -= 1;
+}
+
+class PanasonicDecompressorV5::ProxyStream {
+ ByteStream block;
+ std::vector<uchar8> buf;
+ ByteStream input;
+
+ void parseBlock() {
+ assert(buf.empty());
+ assert(block.getRemainSize() == BlockSize);
+
+ static_assert(BlockSize > sectionSplitOffset, "");
+
+ Buffer FirstSection = block.getBuffer(sectionSplitOffset);
+ Buffer SecondSection = block.getBuffer(block.getRemainSize());
+ assert(FirstSection.getSize() < SecondSection.getSize());
+
+ buf.reserve(BlockSize);
+
+ // First copy the second section. This makes it the first section.
+ buf.insert(buf.end(), SecondSection.begin(), SecondSection.end());
+ // Now append the original 1'st section right after the new 1'st section.
+ buf.insert(buf.end(), FirstSection.begin(), FirstSection.end());
+
+ assert(buf.size() == BlockSize);
+ assert(block.getRemainSize() == 0);
+
+ // And reset the clock.
+ input = ByteStream(DataBuffer(Buffer(buf.data(), buf.size())));
+ // input.setByteOrder(Endianness::big); // does not seem to matter?!
+ }
+
+public:
+ explicit ProxyStream(ByteStream block_) : block(std::move(block_)) {}
+
+ ByteStream& getStream() {
+ parseBlock();
+ return input;
+ }
+};
+
+template <const PanasonicDecompressorV5::PacketDsc& dsc>
+void PanasonicDecompressorV5::processPixelPacket(BitPumpLSB* bs,
+ ushort16* dest) const {
+ static_assert(dsc.pixelsPerPacket > 0, "dsc should be compile-time const");
+ static_assert(dsc.bps > 0 && dsc.bps <= 16, "");
+
+ assert(bs->getFillLevel() == 0);
+
+ const ushort16* const endDest = dest + dsc.pixelsPerPacket;
+ for (; dest != endDest;) {
+ bs->fill();
+ for (; bs->getFillLevel() >= dsc.bps; dest++) {
+ assert(dest != endDest);
+
+ *dest = bs->getBitsNoFill(dsc.bps);
+ }
+ }
+ bs->skipBitsNoFill(bs->getFillLevel()); // get rid of padding.
+}
+
+template <const PanasonicDecompressorV5::PacketDsc& dsc>
+void PanasonicDecompressorV5::processBlock(const Block& block) const {
+ static_assert(dsc.pixelsPerPacket > 0, "dsc should be compile-time const");
+ static_assert(BlockSize % bytesPerPacket == 0, "");
+
+ ProxyStream proxy(block.bs);
+ BitPumpLSB bs(proxy.getStream());
+
+ for (int y = block.beginCoord.y; y <= block.endCoord.y; y++) {
+ int x = 0;
+ // First row may not begin at the first column.
+ if (block.beginCoord.y == y)
+ x = block.beginCoord.x;
+
+ int endx = mRaw->dim.x;
+ // Last row may end before the last column.
+ if (block.endCoord.y == y)
+ endx = block.endCoord.x;
+
+ auto* dest = reinterpret_cast<ushort16*>(mRaw->getData(x, y));
+
+ assert(x % dsc.pixelsPerPacket == 0);
+ assert(endx % dsc.pixelsPerPacket == 0);
+
+ for (; x < endx;) {
+ processPixelPacket<dsc>(&bs, dest);
+
+ x += dsc.pixelsPerPacket;
+ dest += dsc.pixelsPerPacket;
+ }
+ }
+}
+
+template <const PanasonicDecompressorV5::PacketDsc& dsc>
+void PanasonicDecompressorV5::decompressThreadedInternal(
+ const RawDecompressorThread* t) const {
+ assert(t->start < t->end);
+ assert(t->end <= blocks.size());
+ for (size_t i = t->start; i < t->end; i++)
+ processBlock<dsc>(blocks[i]);
+}
+
+void PanasonicDecompressorV5::decompressThreaded(
+ const RawDecompressorThread* t) const {
+ switch (bps) {
+ case 12:
+ decompressThreadedInternal<TwelveBitPacket>(t);
+ break;
+ case 14:
+ decompressThreadedInternal<FourteenBitPacket>(t);
+ break;
+ default:
+ __builtin_unreachable();
+ }
+}
+
+void PanasonicDecompressorV5::decompress() const {
+ assert(blocks.size() == numBlocks);
+ startThreading(blocks.size());
+}
+
+} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressorV5.h b/src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressorV5.h
new file mode 100644
index 000000000..2af005cba
--- /dev/null
+++ b/src/external/rawspeed/src/librawspeed/decompressors/PanasonicDecompressorV5.h
@@ -0,0 +1,109 @@
+/*
+ RawSpeed - RAW file decoder.
+
+ Copyright (C) 2018 Roman Lebedev
+ Copyright (C) 2018 Stefan Hoffmeister
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#pragma once
+
+#include "common/Common.h" // for uint32
+#include "common/Point.h" // for iPoint2D
+#include "decompressors/AbstractParallelizedDecompressor.h"
+#include "io/BitPumpLSB.h" // for BitPumpLSB
+#include "io/ByteStream.h" // for ByteStream
+#include <cstddef> // for size_t
+#include <utility> // for move
+#include <vector> // for vector
+
+namespace rawspeed {
+
+class RawImage;
+
+class PanasonicDecompressorV5 final : public AbstractParallelizedDecompressor {
+ // The RW2 raw image buffer consists of individual blocks,
+ // each one BlockSize bytes in size.
+ static constexpr uint32 BlockSize = 0x4000;
+
+ // These blocks themselves comprise of two sections,
+ // split and swapped at section_split_offset:
+ // bytes: [0..sectionSplitOffset-1][sectionSplitOffset..BlockSize-1]
+ // pixels: [a..b][0..a-1]
+ // When reading, these two sections need to be swapped to enable linear
+ // processing..
+ static constexpr uint32 sectionSplitOffset = 0x1FF8;
+
+ // The blocks themselves consist of packets with fixed size of bytesPerPacket,
+ // and each packet decodes to pixelsPerPacket pixels, which depends on bps.
+ static constexpr uint32 bytesPerPacket = 16;
+ static constexpr uint32 bitsPerPacket = 8 * bytesPerPacket;
+ static_assert(BlockSize % bytesPerPacket == 0, "");
+ static constexpr uint32 PacketsPerBlock = BlockSize / bytesPerPacket;
+
+ // Contains the decoding recepie for the packet,
+ struct PacketDsc;
+
+ // There are two variants. Which one is to be used depends on image's bps.
+ static const PacketDsc TwelveBitPacket;
+ static const PacketDsc FourteenBitPacket;
+
+ // Takes care of unsplitting&swapping back the block at sectionSplitOffset.
+ class ProxyStream;
+
+ // The full input buffer, containing all the blocks.
+ ByteStream input;
+
+ const uint32 bps;
+
+ size_t numBlocks;
+
+ struct Block {
+ ByteStream bs;
+ iPoint2D beginCoord;
+ // The rectangle is an incorrect representation. All the rows
+ // between the first and last one span the entire width of the image.
+ iPoint2D endCoord;
+
+ Block() = default;
+ Block(ByteStream&& bs_, iPoint2D beginCoord_, iPoint2D endCoord_)
+ : bs(std::move(bs_)), beginCoord(beginCoord_), endCoord(endCoord_) {}
+ };
+
+ // If really wanted, this vector could be avoided,
+ // and each Block computed on-the-fly
+ std::vector<Block> blocks;
+
+ void chopInputIntoBlocks(const PacketDsc& dsc);
+
+ template <const PacketDsc& dsc>
+ void processPixelPacket(BitPumpLSB* bs, ushort16* dest) const;
+
+ template <const PacketDsc& dsc> void processBlock(const Block& block) const;
+
+ template <const PacketDsc& dsc>
+ void decompressThreadedInternal(const RawDecompressorThread* t) const;
+
+ void decompressThreaded(const RawDecompressorThread* t) const final;
+
+public:
+ PanasonicDecompressorV5(const RawImage& img, const ByteStream& input_,
+ uint32 bps_);
+
+ void decompress() const final;
+};
+
+} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/PentaxDecompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/PentaxDecompressor.cpp
index 50fff52ef..ef4a38241 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/PentaxDecompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/PentaxDecompressor.cpp
@@ -27,20 +27,18 @@
#include "io/BitPumpMSB.h" // for BitPumpMSB, BitStream<>::f...
#include "io/Buffer.h" // for Buffer
#include "io/ByteStream.h" // for ByteStream
-#include "tiff/TiffEntry.h" // for TiffEntry, ::TIFF_UNDEFINED
-#include "tiff/TiffIFD.h" // for TiffIFD
-#include "tiff/TiffTag.h" // for TiffTag
#include <cassert> // for assert
-#include <vector> // for vector, allocator
+#include <vector> // for vector
namespace rawspeed {
// 16 entries of codes per bit length
// 13 entries of code values
-const uchar8 PentaxDecompressor::pentax_tree[][2][16] = {
- {{0, 2, 3, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0},
- {3, 4, 2, 5, 1, 6, 0, 7, 8, 9, 10, 11, 12}},
-};
+const std::array<std::array<std::array<uchar8, 16>, 2>, 1>
+ PentaxDecompressor::pentax_tree = {{
+ {{{0, 2, 3, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0},
+ {3, 4, 2, 5, 1, 6, 0, 7, 8, 9, 10, 11, 12}}},
+ }};
PentaxDecompressor::PentaxDecompressor(const RawImage& img,
ByteStream* metaData)
@@ -60,9 +58,9 @@ HuffmanTable PentaxDecompressor::SetupHuffmanTable_Legacy() {
HuffmanTable ht;
/* Initialize with legacy data */
- auto nCodes = ht.setNCodesPerLength(Buffer(pentax_tree[0][0], 16));
+ auto nCodes = ht.setNCodesPerLength(Buffer(pentax_tree[0][0].data(), 16));
assert(nCodes == 13); // see pentax_tree definition
- ht.setCodeValues(Buffer(pentax_tree[0][1], nCodes));
+ ht.setCodeValues(Buffer(pentax_tree[0][1].data(), nCodes));
return ht;
}
@@ -76,8 +74,8 @@ HuffmanTable PentaxDecompressor::SetupHuffmanTable_Modern(ByteStream stream) {
stream.skipBytes(12);
- uint32 v0[16];
- uint32 v1[16];
+ std::array<uint32, 16> v0;
+ std::array<uint32, 16> v1;
for (uint32 i = 0; i < depth; i++)
v0[i] = stream.getU16();
for (uint32 i = 0; i < depth; i++) {
@@ -90,7 +88,7 @@ HuffmanTable PentaxDecompressor::SetupHuffmanTable_Modern(ByteStream stream) {
std::vector<uchar8> nCodesPerLength;
nCodesPerLength.resize(17);
- uint32 v2[16];
+ std::array<uint32, 16> v2;
/* Calculate codes and store bitcounts */
for (uint32 c = 0; c < depth; c++) {
v2[c] = v0[c] >> (12 - v1[c]);
@@ -146,8 +144,8 @@ void PentaxDecompressor::decompress(const ByteStream& data) const {
assert(mRaw->dim.x > 0);
assert(mRaw->dim.x % 2 == 0);
- int pUp1[2] = {0, 0};
- int pUp2[2] = {0, 0};
+ std::array<int, 2> pUp1 = {{}};
+ std::array<int, 2> pUp2 = {{}};
for (int y = 0; y < mRaw->dim.y && mRaw->dim.x >= 2; y++) {
auto* dest = reinterpret_cast<ushort16*>(&draw[y * mRaw->pitch]);
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/PentaxDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/PentaxDecompressor.h
index 76c5b84b3..4cc915977 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/PentaxDecompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/PentaxDecompressor.h
@@ -21,8 +21,8 @@
#pragma once
-#include "common/Common.h" // for uint32
-#include "common/RawImage.h" // for RawImage, RawImageData
+#include "common/Common.h" // for uchar8
+#include "common/RawImage.h" // for RawImage
#include "decompressors/AbstractDecompressor.h" // for AbstractDecompressor
#include "decompressors/HuffmanTable.h" // for HuffmanTable
@@ -30,8 +30,6 @@ namespace rawspeed {
class ByteStream;
-class TiffIFD;
-
class PentaxDecompressor final : public AbstractDecompressor {
RawImage mRaw;
const HuffmanTable ht;
@@ -46,7 +44,7 @@ private:
static HuffmanTable SetupHuffmanTable_Modern(ByteStream stream);
static HuffmanTable SetupHuffmanTable(ByteStream* metaData);
- static const uchar8 pentax_tree[][2][16];
+ static const std::array<std::array<std::array<uchar8, 16>, 2>, 1> pentax_tree;
};
} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/PhaseOneDecompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/PhaseOneDecompressor.cpp
new file mode 100644
index 000000000..61a733793
--- /dev/null
+++ b/src/external/rawspeed/src/librawspeed/decompressors/PhaseOneDecompressor.cpp
@@ -0,0 +1,152 @@
+/*
+ RawSpeed - RAW file decoder.
+
+ Copyright (C) 2009-2014 Klaus Post
+ Copyright (C) 2014-2015 Pedro Côrte-Real
+ Copyright (C) 2017-2018 Roman Lebedev
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#include "decompressors/PhaseOneDecompressor.h"
+#include "common/Common.h" // for int32, uint32, ushort16
+#include "common/Point.h" // for iPoint2D
+#include "common/RawImage.h" // for RawImage, RawImageData
+#include "decoders/RawDecoderException.h" // for ThrowRDE
+#include "io/BitPumpMSB32.h" // for BitPumpMSB32
+#include <algorithm> // for for_each
+#include <array> // for array
+#include <cassert> // for assert
+#include <cstddef> // for size_t
+#include <utility> // for move
+#include <vector> // for vector, vector<>::size_type
+
+namespace rawspeed {
+
+PhaseOneDecompressor::PhaseOneDecompressor(const RawImage& img,
+ std::vector<PhaseOneStrip>&& strips_)
+ : AbstractParallelizedDecompressor(img), strips(std::move(strips_)) {
+ if (mRaw->getDataType() != TYPE_USHORT16)
+ ThrowRDE("Unexpected data type");
+
+ if (!((mRaw->getCpp() == 1 && mRaw->getBpp() == 2)))
+ ThrowRDE("Unexpected cpp: %u", mRaw->getCpp());
+
+ if (!mRaw->dim.hasPositiveArea() || mRaw->dim.x % 2 != 0 ||
+ mRaw->dim.x > 11608 || mRaw->dim.y > 8708) {
+ ThrowRDE("Unexpected image dimensions found: (%u; %u)", mRaw->dim.x,
+ mRaw->dim.y);
+ }
+
+ validateStrips();
+}
+
+void PhaseOneDecompressor::validateStrips() const {
+ // The 'strips' vector should contain exactly one element per row of image.
+
+ // If the lenght is different, then the 'strips' vector is clearly incorrect.
+ if (strips.size() != static_cast<decltype(strips)::size_type>(mRaw->dim.y)) {
+ ThrowRDE("Height (%u) vs strip count %zu mismatch", mRaw->dim.y,
+ strips.size());
+ }
+
+ struct RowBin {
+ using value_type = unsigned char;
+ bool isEmpty() const { return data == 0; }
+ void fill() { data = 1; }
+ value_type data = 0;
+ };
+
+ // Now, the strips in 'strips' vector aren't in order.
+ // The 'decltype(strips)::value_type::n' is the row number of a strip.
+ // We need to make sure that we have every row (0..mRaw->dim.y-1), once.
+
+ // There are many ways to do that. Here, we take the histogram of all the
+ // row numbers, and if any bin ends up not being '1' (one strip per row),
+ // then the input is bad.
+ std::vector<RowBin> histogram;
+ histogram.resize(strips.size());
+ int numBinsFilled = 0;
+ std::for_each(strips.begin(), strips.end(),
+ [y = mRaw->dim.y, &histogram,
+ &numBinsFilled](const PhaseOneStrip& strip) {
+ if (strip.n < 0 || strip.n >= y)
+ ThrowRDE("Strip specifies out-of-bounds row %u", strip.n);
+ RowBin& rowBin = histogram[strip.n];
+ if (!rowBin.isEmpty())
+ ThrowRDE("Duplicate row %u", strip.n);
+ rowBin.fill();
+ numBinsFilled++;
+ });
+ assert(histogram.size() == strips.size());
+ assert(numBinsFilled == mRaw->dim.y &&
+ "We should only get here if all the rows/bins got filled.");
+}
+
+void PhaseOneDecompressor::decompressStrip(const PhaseOneStrip& strip) const {
+ uint32 width = mRaw->dim.x;
+ assert(width % 2 == 0);
+
+ static constexpr std::array<const int, 10> length = {8, 7, 6, 9, 11,
+ 10, 5, 12, 14, 13};
+
+ BitPumpMSB32 pump(strip.bs);
+
+ std::array<int32, 2> pred;
+ pred.fill(0);
+ std::array<int, 2> len;
+ auto* img = reinterpret_cast<ushort16*>(mRaw->getData(0, strip.n));
+ for (uint32 col = 0; col < width; col++) {
+ if (col >= (width & ~7U)) // last 'width % 8' pixels.
+ len[0] = len[1] = 14;
+ else if ((col & 7) == 0) {
+ for (int& i : len) {
+ int j = 0;
+
+ for (; j < 5; j++) {
+ if (pump.getBits(1) != 0) {
+ if (col == 0)
+ ThrowRDE("Can not initialize lengths. Data is corrupt.");
+
+ // else, we have previously initialized lengths, so we are fine
+ break;
+ }
+ }
+
+ assert((col == 0 && j > 0) || col != 0);
+ if (j > 0)
+ i = length[2 * (j - 1) + pump.getBits(1)];
+ }
+ }
+
+ int i = len[col & 1];
+ if (i == 14)
+ img[col] = pred[col & 1] = pump.getBits(16);
+ else {
+ pred[col & 1] +=
+ static_cast<signed>(pump.getBits(i)) + 1 - (1 << (i - 1));
+ // FIXME: is the truncation the right solution here?
+ img[col] = ushort16(pred[col & 1]);
+ }
+ }
+}
+
+void PhaseOneDecompressor::decompressThreaded(
+ const RawDecompressorThread* t) const {
+ for (size_t i = t->start; i < t->end && i < strips.size(); i++)
+ decompressStrip(strips[i]);
+}
+
+} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/PhaseOneDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/PhaseOneDecompressor.h
new file mode 100644
index 000000000..e7b9d3f81
--- /dev/null
+++ b/src/external/rawspeed/src/librawspeed/decompressors/PhaseOneDecompressor.h
@@ -0,0 +1,55 @@
+/*
+ RawSpeed - RAW file decoder.
+
+ Copyright (C) 2009-2014 Klaus Post
+ Copyright (C) 2014 Pedro Côrte-Real
+ Copyright (C) 2018 Roman Lebedev
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#pragma once
+
+#include "decompressors/AbstractParallelizedDecompressor.h" // for Abstract...
+#include "io/ByteStream.h" // for ByteStream
+#include <utility> // for move
+#include <vector> // for vector
+
+namespace rawspeed {
+
+class RawImage;
+
+struct PhaseOneStrip {
+ const int n;
+ const ByteStream bs;
+
+ PhaseOneStrip(int block, ByteStream bs_) : n(block), bs(std::move(bs_)) {}
+};
+
+class PhaseOneDecompressor final : public AbstractParallelizedDecompressor {
+ std::vector<PhaseOneStrip> strips;
+
+ void decompressStrip(const PhaseOneStrip& strip) const;
+
+ void decompressThreaded(const RawDecompressorThread* t) const final;
+
+ void validateStrips() const;
+
+public:
+ PhaseOneDecompressor(const RawImage& img,
+ std::vector<PhaseOneStrip>&& strips_);
+};
+
+} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/SamsungV0Decompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/SamsungV0Decompressor.cpp
index da31936bb..b67659580 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/SamsungV0Decompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/SamsungV0Decompressor.cpp
@@ -21,17 +21,12 @@
*/
#include "decompressors/SamsungV0Decompressor.h"
-#include "common/Common.h" // for ushort16, uint32, int32
+#include "common/Common.h" // for uint32, ushort16, int32
#include "common/Point.h" // for iPoint2D
#include "common/RawImage.h" // for RawImage, RawImageData
#include "decoders/RawDecoderException.h" // for ThrowRDE
#include "io/BitPumpMSB32.h" // for BitPumpMSB32
-#include "io/Buffer.h" // for Buffer
#include "io/ByteStream.h" // for ByteStream
-#include "io/Endianness.h" // for Endianness, Endianness::li...
-#include "tiff/TiffEntry.h" // for TiffEntry
-#include "tiff/TiffIFD.h" // for TiffIFD
-#include "tiff/TiffTag.h" // for TiffTag, TiffTag::IMAGELENGTH
#include <algorithm> // for max
#include <cassert> // for assert
#include <iterator> // for advance, begin, end, next
@@ -122,7 +117,7 @@ void SamsungV0Decompressor::decompressStrip(uint32 y,
BitPumpMSB32 bits(bs);
- int len[4];
+ std::array<int, 4> len;
for (int& i : len)
i = y < 2 ? 7 : 4;
@@ -139,7 +134,7 @@ void SamsungV0Decompressor::decompressStrip(uint32 y,
bits.fill();
bool dir = !!bits.getBitsNoFill(1);
- int op[4];
+ std::array<int, 4> op;
for (int& i : op)
i = bits.getBitsNoFill(2);
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/SamsungV0Decompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/SamsungV0Decompressor.h
index da840ee7b..958123c1d 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/SamsungV0Decompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/SamsungV0Decompressor.h
@@ -28,9 +28,7 @@
namespace rawspeed {
-class Buffer;
class RawImage;
-class TiffIFD;
// Decoder for compressed srw files (NX300 and later)
class SamsungV0Decompressor final : public AbstractSamsungDecompressor {
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/SamsungV1Decompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/SamsungV1Decompressor.cpp
index 47e4fa285..a1aa17904 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/SamsungV1Decompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/SamsungV1Decompressor.cpp
@@ -84,12 +84,23 @@ void SamsungV1Decompressor::decompress() {
// encode, the second the number of bits that come after with the difference
// The table has 14 entries because the difference can have between 0 (no
// difference) and 13 bits (differences between 12 bits numbers can need 13)
- const uchar8 tab[14][2] = {{3, 4}, {3, 7}, {2, 6}, {2, 5}, {4, 3},
- {6, 0}, {7, 9}, {8, 10}, {9, 11}, {10, 12},
- {10, 13}, {5, 1}, {4, 8}, {4, 2}};
+ static const std::array<std::array<uchar8, 2>, 14> tab = {{{3, 4},
+ {3, 7},
+ {2, 6},
+ {2, 5},
+ {4, 3},
+ {6, 0},
+ {7, 9},
+ {8, 10},
+ {9, 11},
+ {10, 12},
+ {10, 13},
+ {5, 1},
+ {4, 8},
+ {4, 2}}};
std::vector<encTableItem> tbl(1024);
- ushort16 vpred[2][2] = {{0, 0}, {0, 0}};
- ushort16 hpred[2];
+ std::array<std::array<ushort16, 2>, 2> vpred = {{}};
+ std::array<ushort16, 2> hpred;
// We generate a 1024 entry table (to be addressed by reading 10 bits) by
// consecutively filling in 2^(10-N) positions where N is the variable number
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/SamsungV2Decompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/SamsungV2Decompressor.cpp
index 49b3a33e3..b3f71c922 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/SamsungV2Decompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/SamsungV2Decompressor.cpp
@@ -202,7 +202,7 @@ void SamsungV2Decompressor::decompressRow(uint32 row) {
// By default we are not scaling values at all
int32 scale = 0;
- uint32 diffBitsMode[3][2] = {{0}};
+ std::array<std::array<int, 2>, 3> diffBitsMode = {{}};
for (auto& i : diffBitsMode)
i[0] = i[1] = (row == 0 || row == 1) ? 7 : 4;
@@ -210,7 +210,7 @@ void SamsungV2Decompressor::decompressRow(uint32 row) {
assert(width % 16 == 0);
for (uint32 col = 0; col < width; col += 16) {
if (!(optflags & OptFlags::QP) && !(col & 63)) {
- static constexpr int32 scalevals[] = {0, -2, 2};
+ static constexpr std::array<int32, 3> scalevals = {{0, -2, 2}};
uint32 i = pump.getBits(2);
scale = i < 3 ? scale + scalevals[i] : pump.getBits(12);
}
@@ -236,8 +236,10 @@ void SamsungV2Decompressor::decompressRow(uint32 row) {
ThrowRDE(
"Got a previous line lookup on first two lines. File corrupted?");
- static constexpr int32 motionOffset[7] = {-4, -2, -2, 0, 0, 2, 4};
- static constexpr int32 motionDoAverage[7] = {0, 0, 1, 0, 1, 0, 0};
+ static constexpr std::array<int32, 7> motionOffset = {-4, -2, -2, 0,
+ 0, 2, 4};
+ static constexpr std::array<int32, 7> motionDoAverage = {0, 0, 1, 0,
+ 1, 0, 0};
int32 slideOffset = motionOffset[motion];
int32 doAverage = motionDoAverage[motion];
@@ -273,9 +275,9 @@ void SamsungV2Decompressor::decompressRow(uint32 row) {
}
// Figure out how many difference bits we have to read for each pixel
- uint32 diffBits[4] = {0};
+ std::array<uint32, 4> diffBits = {};
if (optflags & OptFlags::SKIP || !pump.getBits(1)) {
- uint32 flags[4];
+ std::array<uint32, 4> flags;
for (unsigned int& flag : flags)
flag = pump.getBits(2);
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/SonyArw1Decompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/SonyArw1Decompressor.cpp
index 0d6bddc30..1cbd2360b 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/SonyArw1Decompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/SonyArw1Decompressor.cpp
@@ -21,14 +21,13 @@
*/
#include "decompressors/SonyArw1Decompressor.h"
-#include "common/Common.h" // for uchar8
-#include "common/Point.h" // for iPoint2D
-#include "common/RawImage.h" // for RawImage
-#include "decoders/RawDecoderException.h" // for ThrowRDE
-#include "decompressors/AbstractDecompressor.h" // for RawDecom...
-#include "decompressors/HuffmanTable.h" // for HuffmanTable
-#include "io/BitPumpMSB.h" // for BitPumpMSB
-#include <algorithm> // for move
+#include "common/Common.h" // for uint32, uchar8, ushort16
+#include "common/Point.h" // for iPoint2D
+#include "common/RawImage.h" // for RawImage, RawImageData
+#include "decoders/RawDecoderException.h" // for ThrowRDE
+#include "decompressors/HuffmanTable.h" // for HuffmanTable
+#include "io/BitPumpMSB.h" // for BitPumpMSB
+#include <cassert> // for assert
namespace rawspeed {
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/SonyArw1Decompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/SonyArw1Decompressor.h
index 67a158675..1be0ccb8a 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/SonyArw1Decompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/SonyArw1Decompressor.h
@@ -22,11 +22,10 @@
#include "common/RawImage.h" // for RawImage
#include "decompressors/AbstractDecompressor.h" // for AbstractDecompressor
-#include "io/ByteStream.h" // for ByteStream
namespace rawspeed {
-class RawImage;
+class ByteStream;
class SonyArw1Decompressor final : public AbstractDecompressor {
RawImage mRaw;
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/SonyArw2Decompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/SonyArw2Decompressor.cpp
index 231b23a72..8465ed013 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/SonyArw2Decompressor.cpp
+++ b/src/external/rawspeed/src/librawspeed/decompressors/SonyArw2Decompressor.cpp
@@ -21,13 +21,13 @@
*/
#include "decompressors/SonyArw2Decompressor.h"
-#include "common/Common.h" // for uchar8
+#include "common/Common.h" // for uint32
#include "common/Point.h" // for iPoint2D
#include "common/RawImage.h" // for RawImage
-#include "decoders/RawDecoderException.h" // for ThrowRDE, ...
+#include "decoders/RawDecoderException.h" // for ThrowRDE
#include "decompressors/AbstractParallelizedDecompressor.h" // for RawDecom...
#include "io/BitPumpLSB.h" // for BitPumpLSB
-#include <algorithm> // for move
+#include <cassert> // for assert
namespace rawspeed {
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/UncompressedDecompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/UncompressedDecompressor.h
index 6e625137c..8d70c19a3 100644
--- a/src/external/rawspeed/src/librawspeed/decompressors/UncompressedDecompressor.h
+++ b/src/external/rawspeed/src/librawspeed/decompressors/UncompressedDecompressor.h
@@ -28,7 +28,7 @@
#include "io/Buffer.h" // for Buffer, Buffer::size_type
#include "io/ByteStream.h" // for ByteStream
#include "io/Endianness.h" // for Endianness
-#include <algorithm> // for move
+#include <utility> // for move
namespace rawspeed {
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/VC5Decompressor.cpp b/src/external/rawspeed/src/librawspeed/decompressors/VC5Decompressor.cpp
new file mode 100644
index 000000000..b6ffde7bc
--- /dev/null
+++ b/src/external/rawspeed/src/librawspeed/decompressors/VC5Decompressor.cpp
@@ -0,0 +1,829 @@
+/*
+ RawSpeed - RAW file decoder.
+
+ Copyright (C) 2018 Stefan Löffler
+ Copyright (C) 2018 Roman Lebedev
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+/*
+ This is a decompressor for VC-5 raw compression algo, as used in GoPro raws.
+ This implementation is similar to that one of the official reference
+ implementation of the https://github.com/gopro/gpr project, and is producing
+ bitwise-identical output as compared with the Adobe DNG Converter
+ implementation.
+ */
+
+#include "rawspeedconfig.h"
+#include "decompressors/VC5Decompressor.h"
+#include "common/Array2DRef.h" // for Array2DRef
+#include "common/Optional.h" // for Optional
+#include "common/Point.h" // for iPoint2D
+#include "common/RawspeedException.h" // for RawspeedException
+#include "common/SimpleLUT.h" // for SimpleLUT, SimpleLUT<>::va...
+#include "decoders/RawDecoderException.h" // for ThrowRDE
+#include "io/Endianness.h" // for Endianness, Endianness::big
+#include <cassert> // for assert
+#include <cmath> // for pow
+#include <initializer_list> // for initializer_list
+#include <limits> // for numeric_limits
+#include <string> // for string
+#include <utility> // for move
+
+namespace {
+
+// Definitions needed by table17.inc
+// Taken from
+// https://github.com/gopro/gpr/blob/a513701afce7b03173213a2f67dfd9dd28fa1868/source/lib/vc5_decoder/vlc.h
+struct RLV {
+ uint_fast8_t size; //!< Size of code word in bits
+ uint32_t bits; //!< Code word bits right justified
+ uint16_t count; //!< Run length
+ uint16_t value; //!< Run value (unsigned)
+};
+#define RLVTABLE(n) \
+ struct { \
+ const uint32_t length; \
+ const RLV entries[n]; \
+ } constexpr
+#include "gopro/vc5/table17.inc"
+
+constexpr int16_t decompand(int16_t val) {
+ double c = val;
+ // Invert companding curve
+ c += (c * c * c * 768) / (255. * 255. * 255.);
+ if (c > std::numeric_limits<int16_t>::max())
+ return std::numeric_limits<int16_t>::max();
+ if (c < std::numeric_limits<int16_t>::min())
+ return std::numeric_limits<int16_t>::min();
+ return c;
+}
+
+#ifndef NDEBUG
+int ignore = []() {
+ for (const RLV& entry : table17.entries) {
+ assert(((-decompand(entry.value)) == decompand(-int16_t(entry.value))) &&
+ "negation of decompanded value is the same as decompanding of "
+ "negated value");
+ }
+ return 0;
+}();
+#endif
+
+const std::array<RLV, table17.length> decompandedTable17 = []() {
+ std::array<RLV, table17.length> d;
+ for (auto i = 0U; i < table17.length; i++) {
+ d[i] = table17.entries[i];
+ d[i].value = decompand(table17.entries[i].value);
+ }
+ return d;
+}();
+
+} // namespace
+
+#define PRECISION_MIN 8
+#define PRECISION_MAX 32
+
+#define MARKER_BAND_END 1
+
+namespace rawspeed {
+
+void VC5Decompressor::Wavelet::setBandValid(const int band) {
+ mDecodedBandMask |= (1 << band);
+}
+
+bool VC5Decompressor::Wavelet::isBandValid(const int band) const {
+ return mDecodedBandMask & (1 << band);
+}
+
+bool VC5Decompressor::Wavelet::allBandsValid() const {
+ return mDecodedBandMask == static_cast<uint32>((1 << numBands) - 1);
+}
+
+Array2DRef<const int16_t>
+VC5Decompressor::Wavelet::bandAsArray2DRef(const unsigned int iBand) const {
+ return {bands[iBand]->data.data(), width, height};
+}
+
+namespace {
+auto convolute = [](int x, int y, std::array<int, 4> muls,
+ const Array2DRef<const int16_t> high, auto lowGetter,
+ int DescaleShift = 0) {
+ auto highCombined = muls[0] * high(x, y);
+ auto lowsCombined = [muls, lowGetter]() {
+ int lows = 0;
+ for (int i = 0; i < 3; i++)
+ lows += muls[1 + i] * lowGetter(i);
+ return lows;
+ }();
+ // Round up 'lows' up
+ lowsCombined += 4;
+ // And finally 'average' them.
+ auto lowsRounded = lowsCombined >> 3;
+ auto total = highCombined + lowsRounded;
+ // Descale it.
+ total <<= DescaleShift;
+ // And average it.
+ total >>= 1;
+ return total;
+};
+
+struct ConvolutionParams {
+ struct First {
+ static constexpr std::array<int, 4> mul_even = {+1, +11, -4, +1};
+ static constexpr std::array<int, 4> mul_odd = {-1, +5, +4, -1};
+ static constexpr int coord_shift = 0;
+ };
+ static constexpr First First{};
+
+ struct Middle {
+ static constexpr std::array<int, 4> mul_even = {+1, +1, +8, -1};
+ static constexpr std::array<int, 4> mul_odd = {-1, -1, +8, +1};
+ static constexpr int coord_shift = -1;
+ };
+ static constexpr Middle Middle{};
+
+ struct Last {
+ static constexpr std::array<int, 4> mul_even = {+1, -1, +4, +5};
+ static constexpr std::array<int, 4> mul_odd = {-1, +1, -4, +11};
+ static constexpr int coord_shift = -2;
+ };
+ static constexpr Last Last{};
+};
+
+constexpr std::array<int, 4> ConvolutionParams::First::mul_even;
+constexpr std::array<int, 4> ConvolutionParams::First::mul_odd;
+
+constexpr std::array<int, 4> ConvolutionParams::Middle::mul_even;
+constexpr std::array<int, 4> ConvolutionParams::Middle::mul_odd;
+
+constexpr std::array<int, 4> ConvolutionParams::Last::mul_even;
+constexpr std::array<int, 4> ConvolutionParams::Last::mul_odd;
+
+} // namespace
+
+void VC5Decompressor::Wavelet::reconstructPass(
+ const Array2DRef<int16_t> dst, const Array2DRef<const int16_t> high,
+ const Array2DRef<const int16_t> low) const noexcept {
+ auto process = [low, high, dst](auto segment, int x, int y) {
+ auto lowGetter = [&x, &y, low](int delta) {
+ return low(x, y + decltype(segment)::coord_shift + delta);
+ };
+ auto convolution = [&x, &y, high, lowGetter](std::array<int, 4> muls) {
+ return convolute(x, y, muls, high, lowGetter, /*DescaleShift*/ 0);
+ };
+
+ int even = convolution(decltype(segment)::mul_even);
+ int odd = convolution(decltype(segment)::mul_odd);
+
+ dst(x, 2 * y) = static_cast<int16_t>(even);
+ dst(x, 2 * y + 1) = static_cast<int16_t>(odd);
+ };
+
+ // Vertical reconstruction
+#ifdef HAVE_OPENMP
+#pragma omp for schedule(static)
+#endif
+ for (int y = 0; y < height; ++y) {
+ if (y == 0) {
+ // 1st row
+ for (int x = 0; x < width; ++x)
+ process(ConvolutionParams::First, x, y);
+ } else if (y + 1 < height) {
+ // middle rows
+ for (int x = 0; x < width; ++x)
+ process(ConvolutionParams::Middle, x, y);
+ } else {
+ // last row
+ for (int x = 0; x < width; ++x)
+ process(ConvolutionParams::Last, x, y);
+ }
+ }
+}
+
+void VC5Decompressor::Wavelet::combineLowHighPass(
+ const Array2DRef<int16_t> dst, const Array2DRef<const int16_t> low,
+ const Array2DRef<const int16_t> high, int descaleShift,
+ bool clampUint = false) const noexcept {
+ auto process = [low, high, descaleShift, clampUint, dst](auto segment, int x,
+ int y) {
+ auto lowGetter = [&x, &y, low](int delta) {
+ return low(x + decltype(segment)::coord_shift + delta, y);
+ };
+ auto convolution = [&x, &y, high, lowGetter,
+ descaleShift](std::array<int, 4> muls) {
+ return convolute(x, y, muls, high, lowGetter, descaleShift);
+ };
+
+ int even = convolution(decltype(segment)::mul_even);
+ int odd = convolution(decltype(segment)::mul_odd);
+
+ if (clampUint) {
+ even = clampBits(even, 14);
+ odd = clampBits(odd, 14);
+ }
+ dst(2 * x, y) = static_cast<int16_t>(even);
+ dst(2 * x + 1, y) = static_cast<int16_t>(odd);
+ };
+
+ // Horizontal reconstruction
+#ifdef HAVE_OPENMP
+#pragma omp for schedule(static)
+#endif
+ for (int y = 0; y < dst.height; ++y) {
+ // First col
+ int x = 0;
+ process(ConvolutionParams::First, x, y);
+ // middle cols
+ for (x = 1; x + 1 < width; ++x) {
+ process(ConvolutionParams::Middle, x, y);
+ }
+ // last col
+ process(ConvolutionParams::Last, x, y);
+ }
+}
+
+void VC5Decompressor::Wavelet::ReconstructableBand::processLow(
+ const Wavelet& wavelet) noexcept {
+ Array2DRef<int16_t> lowpass;
+#ifdef HAVE_OPENMP
+#pragma omp single copyprivate(lowpass)
+#endif
+ lowpass = Array2DRef<int16_t>::create(&lowpass_storage, wavelet.width,
+ 2 * wavelet.height);
+
+ const Array2DRef<const int16_t> highlow = wavelet.bandAsArray2DRef(2);
+ const Array2DRef<const int16_t> lowlow = wavelet.bandAsArray2DRef(0);
+
+ // Reconstruct the "immediates", the actual low pass ...
+ wavelet.reconstructPass(lowpass, highlow, lowlow);
+}
+
+void VC5Decompressor::Wavelet::ReconstructableBand::processHigh(
+ const Wavelet& wavelet) noexcept {
+ Array2DRef<int16_t> highpass;
+#ifdef HAVE_OPENMP
+#pragma omp single copyprivate(highpass)
+#endif
+ highpass = Array2DRef<int16_t>::create(&highpass_storage, wavelet.width,
+ 2 * wavelet.height);
+
+ const Array2DRef<const int16_t> highhigh = wavelet.bandAsArray2DRef(3);
+ const Array2DRef<const int16_t> lowhigh = wavelet.bandAsArray2DRef(1);
+
+ wavelet.reconstructPass(highpass, highhigh, lowhigh);
+}
+
+void VC5Decompressor::Wavelet::ReconstructableBand::combine(
+ const Wavelet& wavelet) noexcept {
+ int16_t descaleShift = (wavelet.prescale == 2 ? 2 : 0);
+
+ Array2DRef<int16_t> dest;
+#ifdef HAVE_OPENMP
+#pragma omp single copyprivate(dest)
+#endif
+ dest =
+ Array2DRef<int16_t>::create(&data, 2 * wavelet.width, 2 * wavelet.height);
+
+ const Array2DRef<int16_t> lowpass(lowpass_storage.data(), wavelet.width,
+ 2 * wavelet.height);
+ const Array2DRef<int16_t> highpass(highpass_storage.data(), wavelet.width,
+ 2 * wavelet.height);
+
+ // And finally, combine the low pass, and high pass.
+ wavelet.combineLowHighPass(dest, lowpass, highpass, descaleShift, clampUint);
+}
+
+void VC5Decompressor::Wavelet::ReconstructableBand::decode(
+ const Wavelet& wavelet) noexcept {
+ assert(wavelet.allBandsValid());
+ assert(data.empty());
+ processLow(wavelet);
+ processHigh(wavelet);
+ combine(wavelet);
+}
+
+VC5Decompressor::VC5Decompressor(ByteStream bs, const RawImage& img)
+ : mRaw(img), mBs(std::move(bs)) {
+ if (!mRaw->dim.hasPositiveArea())
+ ThrowRDE("Bad image dimensions.");
+
+ if (mRaw->dim.x % mVC5.patternWidth != 0)
+ ThrowRDE("Width %u is not a multiple of %u", mRaw->dim.x,
+ mVC5.patternWidth);
+
+ if (mRaw->dim.y % mVC5.patternHeight != 0)
+ ThrowRDE("Height %u is not a multiple of %u", mRaw->dim.y,
+ mVC5.patternHeight);
+
+ // Initialize wavelet sizes.
+ for (Channel& channel : channels) {
+ channel.width = mRaw->dim.x / mVC5.patternWidth;
+ channel.height = mRaw->dim.y / mVC5.patternHeight;
+
+ uint16_t waveletWidth = channel.width;
+ uint16_t waveletHeight = channel.height;
+ for (Wavelet& wavelet : channel.wavelets) {
+ // Pad dimensions as necessary and divide them by two for the next wavelet
+ for (auto* dimension : {&waveletWidth, &waveletHeight})
+ *dimension = roundUpDivision(*dimension, 2);
+ wavelet.width = waveletWidth;
+ wavelet.height = waveletHeight;
+ }
+ }
+
+ if (img->whitePoint <= 0 || img->whitePoint > int(((1U << 16U) - 1U)))
+ ThrowRDE("Bad white level %i", img->whitePoint);
+
+ outputBits = 0;
+ for (int wp = img->whitePoint; wp != 0; wp >>= 1)
+ ++outputBits;
+ assert(outputBits <= 16);
+
+ parseVC5();
+}
+
+void VC5Decompressor::initVC5LogTable() {
+ mVC5LogTable = decltype(mVC5LogTable)(
+ [outputBits = outputBits](unsigned i, unsigned tableSize) {
+ // The vanilla "inverse log" curve for decoding.
+ auto normalizedCurve = [](auto normalizedI) {
+ return (std::pow(113.0, normalizedI) - 1) / 112.0;
+ };
+
+ auto normalizeI = [tableSize](auto x) { return x / (tableSize - 1.0); };
+ auto denormalizeY = [maxVal = std::numeric_limits<ushort16>::max()](
+ auto y) { return maxVal * y; };
+ // Adjust for output whitelevel bitdepth.
+ auto rescaleY = [outputBits](auto y) {
+ auto scale = 16 - outputBits;
+ return y >> scale;
+ };
+
+ const auto naiveY = denormalizeY(normalizedCurve(normalizeI(i)));
+ const auto intY = static_cast<unsigned int>(naiveY);
+ const auto rescaledY = rescaleY(intY);
+ return rescaledY;
+ });
+}
+
+void VC5Decompressor::parseVC5() {
+ mBs.setByteOrder(Endianness::big);
+
+ assert(mRaw->dim.x > 0);
+ assert(mRaw->dim.y > 0);
+
+ // All VC-5 data must start with "VC-%" (0x56432d35)
+ if (mBs.getU32() != 0x56432d35)
+ ThrowRDE("not a valid VC-5 datablock");
+
+ bool done = false;
+ while (!done) {
+ auto tag = static_cast<VC5Tag>(mBs.getU16());
+ ushort16 val = mBs.getU16();
+
+ bool optional = matches(tag, VC5Tag::Optional);
+ if (optional)
+ tag = -tag;
+
+ switch (tag) {
+ case VC5Tag::ChannelCount:
+ if (val != numChannels)
+ ThrowRDE("Bad channel count %u, expected %u", val, numChannels);
+ break;
+ case VC5Tag::ImageWidth:
+ if (val != mRaw->dim.x)
+ ThrowRDE("Image width mismatch: %u vs %u", val, mRaw->dim.x);
+ break;
+ case VC5Tag::ImageHeight:
+ if (val != mRaw->dim.y)
+ ThrowRDE("Image height mismatch: %u vs %u", val, mRaw->dim.y);
+ break;
+ case VC5Tag::LowpassPrecision:
+ if (val < PRECISION_MIN || val > PRECISION_MAX)
+ ThrowRDE("Invalid precision %i", val);
+ mVC5.lowpassPrecision = val;
+ break;
+ case VC5Tag::ChannelNumber:
+ if (val >= numChannels)
+ ThrowRDE("Bad channel number (%u)", val);
+ mVC5.iChannel = val;
+ break;
+ case VC5Tag::ImageFormat:
+ if (val != mVC5.imgFormat)
+ ThrowRDE("Image format %i is not 4(RAW)", val);
+ break;
+ case VC5Tag::SubbandCount:
+ if (val != numSubbands)
+ ThrowRDE("Unexpected subband count %u, expected %u", val, numSubbands);
+ break;
+ case VC5Tag::MaxBitsPerComponent:
+ if (val != VC5_LOG_TABLE_BITWIDTH) {
+ ThrowRDE("Bad bits per componend %u, not %u", val,
+ VC5_LOG_TABLE_BITWIDTH);
+ }
+ break;
+ case VC5Tag::PatternWidth:
+ if (val != mVC5.patternWidth)
+ ThrowRDE("Bad pattern width %u, not %u", val, mVC5.patternWidth);
+ break;
+ case VC5Tag::PatternHeight:
+ if (val != mVC5.patternHeight)
+ ThrowRDE("Bad pattern height %u, not %u", val, mVC5.patternHeight);
+ break;
+ case VC5Tag::SubbandNumber:
+ if (val >= numSubbands)
+ ThrowRDE("Bad subband number %u", val);
+ mVC5.iSubband = val;
+ break;
+ case VC5Tag::Quantization:
+ mVC5.quantization = static_cast<short16>(val);
+ break;
+ case VC5Tag::ComponentsPerSample:
+ if (val != mVC5.cps)
+ ThrowRDE("Bad compnent per sample count %u, not %u", val, mVC5.cps);
+ break;
+ case VC5Tag::PrescaleShift:
+ // FIXME: something is wrong. We get this before VC5Tag::ChannelNumber.
+ // Defaulting to 'mVC5.iChannel=0' seems to work *for existing samples*.
+ for (int iWavelet = 0; iWavelet < numWaveletLevels; ++iWavelet) {
+ auto& channel = channels[mVC5.iChannel];
+ auto& wavelet = channel.wavelets[iWavelet];
+ wavelet.prescale = (val >> (14 - 2 * iWavelet)) & 0x03;
+ }
+ break;
+ default: { // A chunk.
+ unsigned int chunkSize = 0;
+ if (matches(tag, VC5Tag::LARGE_CHUNK)) {
+ chunkSize = static_cast<unsigned int>(
+ ((static_cast<std::underlying_type<VC5Tag>::type>(tag) & 0xff)
+ << 16) |
+ (val & 0xffff));
+ } else if (matches(tag, VC5Tag::SMALL_CHUNK)) {
+ chunkSize = (val & 0xffff);
+ }
+
+ if (is(tag, VC5Tag::LargeCodeblock)) {
+ parseLargeCodeblock(mBs.getStream(chunkSize, 4));
+ break;
+ }
+
+ // And finally, we got here if we didn't handle this tag/maybe-chunk.
+
+ // Magic, all the other 'large' chunks are actually optional,
+ // and don't specify any chunk bytes-to-be-skipped.
+ if (matches(tag, VC5Tag::LARGE_CHUNK)) {
+ optional = true;
+ chunkSize = 0;
+ }
+
+ if (!optional) {
+ ThrowRDE("Unknown (unhandled) non-optional Tag 0x%04hx",
+ static_cast<std::underlying_type<VC5Tag>::type>(tag));
+ }
+
+ if (chunkSize)
+ mBs.skipBytes(chunkSize, 4);
+
+ break;
+ }
+ }
+
+ done = true;
+ for (int iChannel = 0; iChannel < numChannels && done; ++iChannel) {
+ Wavelet& wavelet = channels[iChannel].wavelets[0];
+ if (!wavelet.allBandsValid())
+ done = false;
+ }
+ }
+}
+
+VC5Decompressor::Wavelet::LowPassBand::LowPassBand(const Wavelet& wavelet,
+ ByteStream bs_,
+ ushort16 lowpassPrecision_)
+ : AbstractDecodeableBand(std::move(bs_)),
+ lowpassPrecision(lowpassPrecision_) {
+ // Low-pass band is a uncompressed version of the image, hugely downscaled.
+ // It consists of width * height pixels, `lowpassPrecision` each.
+ // We can easily check that we have sufficient amount of bits to decode it.
+ const auto waveletArea = iPoint2D(wavelet.width, wavelet.height).area();
+ const auto bitsTotal = waveletArea * lowpassPrecision;
+ const auto bytesTotal = roundUpDivision(bitsTotal, 8);
+ bs = bs.getStream(bytesTotal); // And clamp the size while we are at it.
+}
+
+void VC5Decompressor::Wavelet::LowPassBand::decode(const Wavelet& wavelet) {
+ const auto dst =
+ Array2DRef<int16_t>::create(&data, wavelet.width, wavelet.height);
+
+ BitPumpMSB bits(bs);
+ for (auto row = 0; row < dst.height; ++row) {
+ for (auto col = 0; col < dst.width; ++col)
+ dst(col, row) = static_cast<int16_t>(bits.getBits(lowpassPrecision));
+ }
+}
+
+void VC5Decompressor::Wavelet::HighPassBand::decode(const Wavelet& wavelet) {
+ auto dequantize = [quant = quant](int16_t val) -> int16_t {
+ return val * quant;
+ };
+
+ Array2DRef<int16_t>::create(&data, wavelet.width, wavelet.height);
+
+ BitPumpMSB bits(bs);
+ // decode highpass band
+ int pixelValue = 0;
+ unsigned int count = 0;
+ int nPixels = wavelet.width * wavelet.height;
+ for (int iPixel = 0; iPixel < nPixels;) {
+ getRLV(&bits, &pixelValue, &count);
+ for (; count > 0; --count) {
+ if (iPixel >= nPixels)
+ ThrowRDE("Buffer overflow");
+ data[iPixel] = dequantize(pixelValue);
+ ++iPixel;
+ }
+ }
+ getRLV(&bits, &pixelValue, &count);
+ static_assert(decompand(MARKER_BAND_END) == MARKER_BAND_END, "passthrought");
+ if (pixelValue != MARKER_BAND_END || count != 0)
+ ThrowRDE("EndOfBand marker not found");
+}
+
+void VC5Decompressor::parseLargeCodeblock(const ByteStream& bs) {
+ static const auto subband_wavelet_index = []() {
+ std::array<int, numSubbands> wavelets;
+ int wavelet = 0;
+ for (auto i = wavelets.size() - 1; i > 0;) {
+ for (auto t = 0; t < numWaveletLevels; t++) {
+ wavelets[i] = wavelet;
+ i--;
+ }
+ if (i > 0)
+ wavelet++;
+ }
+ wavelets.front() = wavelet;
+ return wavelets;
+ }();
+ static const auto subband_band_index = []() {
+ std::array<int, numSubbands> bands;
+ bands.front() = 0;
+ for (auto i = 1U; i < bands.size();) {
+ for (int t = 1; t <= numWaveletLevels;) {
+ bands[i] = t;
+ t++;
+ i++;
+ }
+ }
+ return bands;
+ }();
+
+ if (!mVC5.iSubband.hasValue())
+ ThrowRDE("Did not see VC5Tag::SubbandNumber yet");
+
+ const int idx = subband_wavelet_index[mVC5.iSubband.getValue()];
+ const int band = subband_band_index[mVC5.iSubband.getValue()];
+
+ auto& wavelets = channels[mVC5.iChannel].wavelets;
+
+ Wavelet& wavelet = wavelets[idx];
+ if (wavelet.isBandValid(band)) {
+ ThrowRDE("Band %u for wavelet %u on channel %u was already seen", band, idx,
+ mVC5.iChannel);
+ }
+
+ std::unique_ptr<Wavelet::AbstractBand>& dstBand = wavelet.bands[band];
+ if (mVC5.iSubband.getValue() == 0) {
+ assert(band == 0);
+ // low-pass band, only one, for the smallest wavelet, per channel per image
+ if (!mVC5.lowpassPrecision.hasValue())
+ ThrowRDE("Did not see VC5Tag::LowpassPrecision yet");
+ dstBand = std::make_unique<Wavelet::LowPassBand>(
+ wavelet, bs, mVC5.lowpassPrecision.getValue());
+ mVC5.lowpassPrecision.reset();
+ } else {
+ if (!mVC5.quantization.hasValue())
+ ThrowRDE("Did not see VC5Tag::Quantization yet");
+ dstBand = std::make_unique<Wavelet::HighPassBand>(
+ bs, mVC5.quantization.getValue());
+ mVC5.quantization.reset();
+ }
+ wavelet.setBandValid(band);
+
+ // If this wavelet is fully specified, mark the low-pass band of the
+ // next lower wavelet as specified.
+ if (idx > 0 && wavelet.allBandsValid()) {
+ Wavelet& nextWavelet = wavelets[idx - 1];
+ assert(!nextWavelet.isBandValid(0));
+ nextWavelet.bands[0] = std::make_unique<Wavelet::ReconstructableBand>();
+ nextWavelet.setBandValid(0);
+ }
+
+ mVC5.iSubband.reset();
+}
+
+void VC5Decompressor::decode(unsigned int offsetX, unsigned int offsetY,
+ unsigned int width, unsigned int height) {
+ if (offsetX || offsetY || mRaw->dim != iPoint2D(width, height))
+ ThrowRDE("VC5Decompressor expects to fill the whole image, not some tile.");
+
+ initVC5LogTable();
+
+ const std::vector<DecodeableBand> allDecodeableBands = [&]() {
+ std::vector<DecodeableBand> bands;
+ bands.reserve(numSubbandsTotal);
+ // All the high-pass bands for all wavelets,
+ // in this specific order of decreasing worksize.
+ for (int waveletLevel = 0; waveletLevel < numWaveletLevels;
+ waveletLevel++) {
+ for (auto channelId = 0; channelId < numChannels; channelId++) {
+ for (int bandId = 1; bandId <= numHighPassBands; bandId++) {
+ auto& channel = channels[channelId];
+ auto& wavelet = channel.wavelets[waveletLevel];
+ auto* band = wavelet.bands[bandId].get();
+ auto* decodeableHighPassBand =
+ dynamic_cast<Wavelet::HighPassBand*>(band);
+ bands.emplace_back(decodeableHighPassBand, wavelet);
+ }
+ }
+ }
+ // The low-pass bands at the end. I'm guessing they should be fast to
+ // decode.
+ for (Channel& channel : channels) {
+ // Low-pass band of the smallest wavelet.
+ Wavelet& smallestWavelet = channel.wavelets.back();
+ auto* decodeableLowPassBand =
+ dynamic_cast<Wavelet::LowPassBand*>(smallestWavelet.bands[0].get());
+ bands.emplace_back(decodeableLowPassBand, smallestWavelet);
+ }
+ assert(allDecodeableBands.size() == numSubbandsTotal);
+ return bands;
+ }();
+
+ const std::vector<ReconstructionStep> reconstructionSteps = [&]() {
+ std::vector<ReconstructionStep> steps;
+ steps.reserve(numLowPassBandsTotal);
+ // For every channel, recursively reconstruct the low-pass bands.
+ for (auto& channel : channels) {
+ // Reconstruct the intermediate lowpass bands.
+ for (int waveletLevel = numWaveletLevels - 1; waveletLevel > 0;
+ waveletLevel--) {
+ Wavelet* wavelet = &(channel.wavelets[waveletLevel]);
+ Wavelet& nextWavelet = channel.wavelets[waveletLevel - 1];
+
+ auto* band = dynamic_cast<Wavelet::ReconstructableBand*>(
+ nextWavelet.bands[0].get());
+ steps.emplace_back(wavelet, band);
+ }
+ // Finally, reconstruct the final lowpass band.
+ Wavelet* wavelet = &(channel.wavelets.front());
+ steps.emplace_back(wavelet, &(channel.band));
+ }
+ assert(steps.size() == numLowPassBandsTotal);
+ return steps;
+ }();
+
+#ifdef HAVE_OPENMP
+ bool exceptionThrown = false;
+#pragma omp parallel default(none) shared(exceptionThrown) \
+ num_threads(rawspeed_get_number_of_processor_cores())
+ {
+#endif
+#ifdef HAVE_OPENMP
+#pragma omp for schedule(dynamic, 1)
+#endif
+ for (auto decodeableBand = allDecodeableBands.begin();
+ decodeableBand < allDecodeableBands.end(); ++decodeableBand) {
+ try {
+ decodeableBand->band->decode(decodeableBand->wavelet);
+ } catch (RawspeedException& err) {
+ // Propagate the exception out of OpenMP magic.
+ mRaw->setError(err.what());
+#ifdef HAVE_OPENMP
+#pragma omp atomic write
+ exceptionThrown = true;
+#pragma omp cancel for
+#else
+ throw;
+#endif
+ }
+ }
+
+#ifdef HAVE_OPENMP
+#pragma omp cancel parallel if (exceptionThrown)
+
+ // Parallel region termination is usually disabled by default,
+ // thus we can't just rely on it. Proceed only if decoding did not fail.
+ if (!exceptionThrown) {
+#endif
+ // And now, reconstruct the low-pass bands.
+ for (const ReconstructionStep& step : reconstructionSteps) {
+ step.band.decode(step.wavelet);
+
+#ifdef HAVE_OPENMP
+#pragma omp single nowait
+#endif
+ step.wavelet.clear(); // we no longer need it.
+ }
+
+ // And finally!
+ combineFinalLowpassBands();
+
+#ifdef HAVE_OPENMP
+ }
+ }
+
+ std::string firstErr;
+ if (mRaw->isTooManyErrors(1, &firstErr)) {
+ assert(exceptionThrown);
+ ThrowRDE("Too many errors encountered. Giving up. First Error:\n%s",
+ firstErr.c_str());
+ } else {
+ assert(!exceptionThrown);
+ }
+#endif
+}
+
+void VC5Decompressor::combineFinalLowpassBands() const noexcept {
+ const Array2DRef<uint16_t> out(reinterpret_cast<uint16_t*>(mRaw->getData()),
+ mRaw->dim.x, mRaw->dim.y,
+ mRaw->pitch / sizeof(uint16_t));
+
+ const int width = out.width / 2;
+ const int height = out.height / 2;
+
+ const Array2DRef<const int16_t> lowbands0 = Array2DRef<const int16_t>(
+ channels[0].band.data.data(), channels[0].width, channels[0].height);
+ const Array2DRef<const int16_t> lowbands1 = Array2DRef<const int16_t>(
+ channels[1].band.data.data(), channels[1].width, channels[1].height);
+ const Array2DRef<const int16_t> lowbands2 = Array2DRef<const int16_t>(
+ channels[2].band.data.data(), channels[2].width, channels[2].height);
+ const Array2DRef<const int16_t> lowbands3 = Array2DRef<const int16_t>(
+ channels[3].band.data.data(), channels[3].width, channels[3].height);
+
+ // Convert to RGGB output
+#ifdef HAVE_OPENMP
+#pragma omp for schedule(static) collapse(2)
+#endif
+ for (int row = 0; row < height; ++row) {
+ for (int col = 0; col < width; ++col) {
+ const int mid = 2048;
+
+ int gs = lowbands0(col, row);
+ int rg = lowbands1(col, row) - mid;
+ int bg = lowbands2(col, row) - mid;
+ int gd = lowbands3(col, row) - mid;
+
+ int r = gs + 2 * rg;
+ int b = gs + 2 * bg;
+ int g1 = gs + gd;
+ int g2 = gs - gd;
+
+ out(2 * col + 0, 2 * row + 0) = static_cast<uint16_t>(mVC5LogTable[r]);
+ out(2 * col + 1, 2 * row + 0) = static_cast<uint16_t>(mVC5LogTable[g1]);
+ out(2 * col + 0, 2 * row + 1) = static_cast<uint16_t>(mVC5LogTable[g2]);
+ out(2 * col + 1, 2 * row + 1) = static_cast<uint16_t>(mVC5LogTable[b]);
+ }
+ }
+}
+
+inline void VC5Decompressor::getRLV(BitPumpMSB* bits, int* value,
+ unsigned int* count) {
+ unsigned int iTab;
+
+ static constexpr auto maxBits = 1 + table17.entries[table17.length - 1].size;
+
+ // Ensure the maximum number of bits are cached to make peekBits() as fast as
+ // possible.
+ bits->fill(maxBits);
+ for (iTab = 0; iTab < table17.length; ++iTab) {
+ if (decompandedTable17[iTab].bits ==
+ bits->peekBitsNoFill(decompandedTable17[iTab].size))
+ break;
+ }
+ if (iTab >= table17.length)
+ ThrowRDE("Code not found in codebook");
+
+ bits->skipBitsNoFill(decompandedTable17[iTab].size);
+ *value = decompandedTable17[iTab].value;
+ *count = decompandedTable17[iTab].count;
+ if (*value != 0) {
+ if (bits->getBitsNoFill(1))
+ *value = -(*value);
+ }
+}
+
+} // namespace rawspeed
diff --git a/src/external/rawspeed/src/librawspeed/decompressors/VC5Decompressor.h b/src/external/rawspeed/src/librawspeed/decompressors/VC5Decompressor.h
new file mode 100644
index 000000000..268ed2eeb
--- /dev/null
+++ b/src/external/rawspeed/src/librawspeed/decompressors/VC5Decompressor.h
@@ -0,0 +1,229 @@
+/*
+ RawSpeed - RAW file decoder.
+
+ Copyright (C) 2018 Stefan Löffler
+ Copyright (C) 2018 Roman Lebedev
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#pragma once
+
+#include "common/Array2DRef.h" // for Array2DRef
+#include "common/Common.h" // for ushort16, short16
+#include "common/DefaultInitAllocatorAdaptor.h" // for DefaultInitAllocatorA...
+#include "common/Optional.h" // for Optional
+#include "common/RawImage.h" // for RawImage
+#include "common/SimpleLUT.h" // for SimpleLUT, SimpleLUT...
+#include "decompressors/AbstractDecompressor.h" // for AbstractDecompressor
+#include "io/BitPumpMSB.h" // for BitPumpMSB
+#include "io/ByteStream.h" // for ByteStream
+#include <array> // for array
+#include <cstdint> // for int16_t, uint16_t
+#include <memory> // for unique_ptr
+#include <type_traits> // for underlying_type, und...
+#include <utility> // for move
+#include <vector> // for vector
+
+namespace rawspeed {
+
+const int MAX_NUM_PRESCALE = 8;
+
+// Decompresses VC-5 as used by GoPro
+
+enum class VC5Tag : int16_t {
+ NoTag = 0x0, // synthetic, not an actual tag
+
+ ChannelCount = 0x000c,
+ ImageWidth = 0x0014,
+ ImageHeight = 0x0015,
+ LowpassPrecision = 0x0023,
+ SubbandCount = 0x000E,
+ SubbandNumber = 0x0030,
+ Quantization = 0x0035,
+ ChannelNumber = 0x003e,
+ ImageFormat = 0x0054,
+ MaxBitsPerComponent = 0x0066,
+ PatternWidth = 0x006a,
+ PatternHeight = 0x006b,
+ ComponentsPerSample = 0x006c,
+ PrescaleShift = 0x006d,
+
+ LARGE_CHUNK = 0x2000,
+ SMALL_CHUNK = 0x4000,
+ UniqueImageIdentifier = 0x4004,
+ LargeCodeblock = 0x6000,
+
+ Optional = int16_t(0x8000U), // only signbit set
+};
+inline VC5Tag operator&(VC5Tag LHS, VC5Tag RHS) {
+ using value_type = std::underlying_type<VC5Tag>::type;
+ return static_cast<VC5Tag>(static_cast<value_type>(LHS) &
+ static_cast<value_type>(RHS));
+}
+inline bool matches(VC5Tag LHS, VC5Tag RHS) {
+ // Are there any common bit set?
+ return (LHS & RHS) != VC5Tag::NoTag;
+}
+inline bool is(VC5Tag LHS, VC5Tag RHS) {
+ // Does LHS have all the RHS bits set?
+ return (LHS & RHS) == RHS;
+}
+inline VC5Tag operator-(VC5Tag tag) {
+ using value_type = std::underlying_type<VC5Tag>::type;
+ // Negate
+ return static_cast<VC5Tag>(-static_cast<value_type>(tag));
+}
+
+class VC5Decompressor final : public AbstractDecompressor {
+ RawImage mRaw;
+ ByteStream mBs;
+
+ static constexpr auto VC5_LOG_TABLE_BITWIDTH = 12;
+ int outputBits;
+ SimpleLUT<unsigned, VC5_LOG_TABLE_BITWIDTH> mVC5LogTable;
+
+ void initVC5LogTable();
+
+ static constexpr int numWaveletLevels = 3;
+ static constexpr int numHighPassBands = 3;
+ static constexpr int numLowPassBands = 1;
+ static constexpr int numSubbands =
+ numLowPassBands + numHighPassBands * numWaveletLevels;
+
+ struct {
+ ushort16 iChannel = 0; // 0'th channel is the default
+ Optional<ushort16> iSubband;
+ Optional<ushort16> lowpassPrecision;
+ Optional<short16> quantization;
+
+ const ushort16 imgFormat = 4;
+ const ushort16 patternWidth = 2;
+ const ushort16 patternHeight = 2;
+ const ushort16 cps = 1;
+ } mVC5;
+
+ class Wavelet {
+ public:
+ int width, height;
+ int16_t prescale;
+
+ struct AbstractBand {
+ std::vector<int16_t, DefaultInitAllocatorAdaptor<int16_t>> data;
+ virtual ~AbstractBand() = default;
+ virtual void decode(const Wavelet& wavelet) = 0;
+ };
+ struct ReconstructableBand final : AbstractBand {
+ bool clampUint;
+ std::vector<int16_t, DefaultInitAllocatorAdaptor<int16_t>>
+ lowpass_storage;
+ std::vector<int16_t, DefaultInitAllocatorAdaptor<int16_t>>
+ highpass_storage;
+ explicit ReconstructableBand(bool clampUint_ = false)
+ : clampUint(clampUint_) {}
+ void processLow(const Wavelet& wavelet) noexcept;
+ void processHigh(const Wavelet& wavelet) noexcept;
+ void combine(const Wavelet& wavelet) noexcept;
+ void decode(const Wavelet& wavelet) noexcept final;
+ };
+ struct AbstractDecodeableBand : AbstractBand {
+ ByteStream bs;
+ explicit AbstractDecodeableBand(ByteStream bs_) : bs(std::move(bs_)) {}
+ };
+ struct LowPassBand final : AbstractDecodeableBand {
+ ushort16 lowpassPrecision;
+ LowPassBand(const Wavelet& wavelet, ByteStream bs_,
+ ushort16 lowpassPrecision_);
+ void decode(const Wavelet& wavelet) final;
+ };
+ struct HighPassBand final : AbstractDecodeableBand {
+ int16_t quant;
+ HighPassBand(ByteStream bs_, int16_t quant_)
+ : AbstractDecodeableBand(std::move(bs_)), quant(quant_) {}
+ void decode(const Wavelet& wavelet) final;
+ };
+
+ static constexpr uint16_t numBands = 4;
+ std::array<std::unique_ptr<AbstractBand>, numBands> bands;
+
+ void clear() {
+ for (auto& band : bands)
+ band.reset();
+ }
+
+ void setBandValid(int band);
+ bool isBandValid(int band) const;
+ uint32_t getValidBandMask() const { return mDecodedBandMask; }
+ bool allBandsValid() const;
+
+ void reconstructPass(Array2DRef<int16_t> dst,
+ Array2DRef<const int16_t> high,
+ Array2DRef<const int16_t> low) const noexcept;
+
+ void combineLowHighPass(Array2DRef<int16_t> dst,
+ Array2DRef<const int16_t> low,
+ Array2DRef<const int16_t> high, int descaleShift,
+ bool clampUint /*= false*/) const noexcept;
+
+ Array2DRef<const int16_t> bandAsArray2DRef(unsigned int iBand) const;
+
+ protected:
+ uint32 mDecodedBandMask = 0;
+ };
+
+ struct Channel {
+ std::array<Wavelet, numWaveletLevels> wavelets;
+
+ Wavelet::ReconstructableBand band{/*clampUint*/ true};
+ // the final lowband.
+ int width, height;
+ };
+
+ static constexpr int numChannels = 4;
+ static constexpr int numSubbandsTotal = numSubbands * numChannels;
+ static constexpr int numLowPassBandsTotal = numWaveletLevels * numChannels;
+ std::array<Channel, numChannels> channels;
+
+ struct DecodeableBand {
+ Wavelet::AbstractDecodeableBand* band;
+ const Wavelet& wavelet;
+ DecodeableBand(Wavelet::AbstractDecodeableBand* band_,
+ const Wavelet& wavelet_)
+ : band(band_), wavelet(wavelet_) {}
+ };
+ struct ReconstructionStep {
+ Wavelet& wavelet;
+ Wavelet::ReconstructableBand& band;
+ ReconstructionStep(Wavelet* wavelet_, Wavelet::ReconstructableBand* band_)
+ : wavelet(*wavelet_), band(*band_) {}
+ };
+
+ static inline void getRLV(BitPumpMSB* bits, int* value, unsigned int* count);
+
+ void parseLargeCodeblock(const ByteStream& bs);
+
+ // FIXME: this *should* be threadedable nicely.
+ void combineFinalLowpassBands() const noexcept;
+
+ void parseVC5();
+
+public:
+ VC5Decompressor(ByteStream bs, const RawImage& img);
+
+ void decode(unsigned int offsetX, unsigned int offsetY, unsigned int width,
+ unsigned int height);
+};
+
+} // namespace rawspeed