summaryrefslogtreecommitdiff
path: root/silx/opencl/processing.py
blob: 1997a5518b3fd0d433c3cc068a25ccaca4255885 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#    Project: S I L X project
#             https://github.com/silx-kit/silx
#
#    Copyright (C) 2012-2017 European Synchrotron Radiation Facility, Grenoble, France
#
#    Principal author:       Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#

"""
Common OpenCL abstract base classes for different processing
"""

from __future__ import absolute_import, print_function, division


__author__ = "Jerome Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "03/10/2017"
__status__ = "stable"


import os
import logging
import gc
from collections import namedtuple
import numpy
import threading
from .common import ocl, pyopencl, release_cl_buffers, kernel_workgroup_size
from .utils import concatenate_cl_kernel


BufferDescription = namedtuple("BufferDescription", ["name", "size", "dtype", "flags"])
EventDescription = namedtuple("EventDescription", ["name", "event"])

logger = logging.getLogger(__name__)


class KernelContainer(object):
    """Those object holds a copy of all kernels accessible as attributes"""

    def __init__(self, program):
        """Constructor of the class

        :param program: the OpenCL program as generated by PyOpenCL
        """
        for kernel in program.all_kernels():
            self.__setattr__(kernel.function_name, kernel)

    def get_kernels(self):
        "return the dictionary with all kernels"
        return self.__dict__.copy()

    def get_kernel(self, name):
        "get a kernel from its name"
        return self.__dict__.get(name)


class OpenclProcessing(object):
    """Abstract class for different types of OpenCL processing.

    This class provides:
    * Generation of the context, queues, profiling mode
    * Additional function to allocate/free all buffers declared as static attributes of the class
    * Functions to compile kernels, cache them and clean them
    * helper functions to clone the object
    """
    # Example of how to create an output buffer of 10 floats
    buffers = [BufferDescription("output", 10, numpy.float32, None),
               ]
    # list of kernel source files to be concatenated before compilation of the program
    kernel_files = []

    def __init__(self, ctx=None, devicetype="all", platformid=None, deviceid=None,
                 block_size=None, profile=False):
        """Constructor of the abstract OpenCL processing class

        :param ctx: actual working context, left to None for automatic
                    initialization from device type or platformid/deviceid
        :param devicetype: type of device, can be "CPU", "GPU", "ACC" or "ALL"
        :param platformid: integer with the platform_identifier, as given by clinfo
        :param deviceid: Integer with the device identifier, as given by clinfo
        :param block_size: preferred workgroup size, may vary depending on the
                            out come of the compilation
        :param profile: switch on profiling to be able to profile at the kernel
                         level, store profiling elements (makes code slightly slower)
        """
        self.sem = threading.Semaphore()
        self.profile = None
        self.events = []  # List with of EventDescription, kept for profiling
        self.cl_mem = {}  # dict with all buffer allocated
        self.cl_program = None  # The actual OpenCL program
        self.cl_kernel_args = {}  # dict with all kernel arguments
        if ctx:
            self.ctx = ctx
        else:
            self.ctx = ocl.create_context(devicetype=devicetype, platformid=platformid, deviceid=deviceid)
        device_name = self.ctx.devices[0].name.strip()
        platform_name = self.ctx.devices[0].platform.name.strip()
        platform = ocl.get_platform(platform_name)
        self.device = platform.get_device(device_name)
        self.cl_kernel_args = {}  # dict with all kernel arguments

        self.set_profiling(profile)
        self.block_size = block_size
        self.program = None
        self.kernels = None

    def __del__(self):
        """Destructor: release all buffers and programs
        """
        self.free_kernels()
        self.free_buffers()
        self.queue = None
        self.ctx = None
        gc.collect()

    def allocate_buffers(self, buffers=None):
        """
        Allocate OpenCL buffers required for a specific configuration

        :param buffers: a list of BufferDescriptions, leave to None for
                        paramatrized buffers.

        Note that an OpenCL context also requires some memory, as well
        as Event and other OpenCL functionalities which cannot and are
        not taken into account here.  The memory required by a context
        varies depending on the device. Typical for GTX580 is 65Mb but
        for a 9300m is ~15Mb In addition, a GPU will always have at
        least 3-5Mb of memory in use.  Unfortunately, OpenCL does NOT
        have a built-in way to check the actual free memory on a
        device, only the total memory.
        """

        if buffers is None:
            buffers = self.buffers

        with self.sem:
            mem = {}

            # check if enough memory is available on the device
            ualloc = 0
            for buf in buffers:
                ualloc += numpy.dtype(buf.dtype).itemsize * buf.size
            logger.info("%.3fMB are needed on device: %s,  which has %.3fMB",
                        ualloc / 1.0e6, self.device, self.device.memory / 1.0e6)

            if ualloc >= self.device.memory:
                raise MemoryError("Fatal error in allocate_buffers. Not enough "
                                  " device memory for buffers (%lu requested, %lu available)"
                                  % (ualloc, self.device.memory))

            # do the allocation
            try:
                for buf in buffers:
                    size = numpy.dtype(buf.dtype).itemsize * buf.size
                    mem[buf.name] = pyopencl.Buffer(self.ctx, buf.flags, int(size))
            except pyopencl.MemoryError as error:
                release_cl_buffers(mem)
                raise MemoryError(error)

        self.cl_mem.update(mem)

    def add_to_cl_mem(self, parrays):
        """
        Add pyopencl.array, which are allocated by pyopencl, to self.cl_mem.
        This should be used before calling allocate_buffers().

        :param parrays: a dictionary of `pyopencl.array.Array` or `pyopencl.Buffer`
        """
        mem = self.cl_mem
        for name, parr in parrays.items():
            mem[name] = parr
        self.cl_mem.update(mem)

    def check_workgroup_size(self, kernel_name):
        kernel = self.kernels.get_kernel(kernel_name)
        self.compiletime_workgroup_size = kernel_workgroup_size(self.program, kernel)

    def free_buffers(self):
        """free all device.memory allocated on the device
        """
        with self.sem:
            for key, buf in list(self.cl_mem.items()):
                if buf is not None:
                    if isinstance(buf, pyopencl.array.Array):
                        try:
                            buf.data.release()
                        except pyopencl.LogicError:
                            logger.error("Error while freeing buffer %s", key)
                    else:
                        try:
                            buf.release()
                        except pyopencl.LogicError:
                            logger.error("Error while freeing buffer %s", key)
                    self.cl_mem[key] = None

    def compile_kernels(self, kernel_files=None, compile_options=None):
        """Call the OpenCL compiler

        :param kernel_files: list of path to the kernel
            (by default use the one declared in the class)
        :param compile_options: string of compile options
        """
        # concatenate all needed source files into a single openCL module
        kernel_files = kernel_files or self.kernel_files
        kernel_src = concatenate_cl_kernel(kernel_files)

        compile_options = compile_options or ""
        logger.info("Compiling file %s with options %s", kernel_files, compile_options)
        try:
            self.program = pyopencl.Program(self.ctx, kernel_src).build(options=compile_options)
        except (pyopencl.MemoryError, pyopencl.LogicError) as error:
            raise MemoryError(error)
        else:
            self.kernels = KernelContainer(self.program)

    def free_kernels(self):
        """Free all kernels
        """
        for kernel in self.cl_kernel_args:
            self.cl_kernel_args[kernel] = []
        self.kernels = None
        self.program = None

    def set_profiling(self, value=True):
        """Switch On/Off the profiling flag of the command queue to allow debugging

        :param value: set to True to enable profiling, or to False to disable it.
                      Without profiling, the processing is marginally faster

        Profiling information can then be retrieved with the 'log_profile' method
        """
        if bool(value) != self.profile:
            with self.sem:
                self.profile = bool(value)
                if self.profile:
                    self.queue = pyopencl.CommandQueue(self.ctx,
                        properties=pyopencl.command_queue_properties.PROFILING_ENABLE)
                else:
                    self.queue = pyopencl.CommandQueue(self.ctx)

    def log_profile(self):
        """If we are in profiling mode, prints out all timing for every single OpenCL call
        """
        t = 0.0
        out = ["", "Profiling info for OpenCL %s" % self.__class__.__name__]
        if self.profile:
            for e in self.events:
                if "__len__" in dir(e) and len(e) >= 2:
                    et = 1e-6 * (e[1].profile.end - e[1].profile.start)
                    out.append("%50s:\t%.3fms" % (e[0], et))
                    t += et

        out.append("_" * 80)
        out.append("%50s:\t%.3fms" % ("Total execution time", t))
        logger.info(os.linesep.join(out))
        return out

# This should be implemented by concrete class
#     def __copy__(self):
#         """Shallow copy of the object
#
#         :return: copy of the object
#         """
#         return self.__class__((self._data, self._indices, self._indptr),
#                               self.size, block_size=self.BLOCK_SIZE,
#                               platformid=self.platform.id,
#                               deviceid=self.device.id,
#                               checksum=self.on_device.get("data"),
#                               profile=self.profile, empty=self.empty)
#
#     def __deepcopy__(self, memo=None):
#         """deep copy of the object
#
#         :return: deepcopy of the object
#         """
#         if memo is None:
#             memo = {}
#         new_csr = self._data.copy(), self._indices.copy(), self._indptr.copy()
#         memo[id(self._data)] = new_csr[0]
#         memo[id(self._indices)] = new_csr[1]
#         memo[id(self._indptr)] = new_csr[2]
#         new_obj = self.__class__(new_csr, self.size,
#                                  block_size=self.BLOCK_SIZE,
#                                  platformid=self.platform.id,
#                                  deviceid=self.device.id,
#                                  checksum=self.on_device.get("data"),
#                                  profile=self.profile, empty=self.empty)
#         memo[id(self)] = new_obj
#         return new_obj