[ VIGRA Homepage | Function Index | Class Index | Namespaces | File List | Main Page ]

threading.hxx VIGRA

1 /************************************************************************/
2 /* */
3 /* Copyright 2013-2014 by Ullrich Koethe */
4 /* */
5 /* This file is part of the VIGRA computer vision library. */
6 /* The VIGRA Website is */
7 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
8 /* Please direct questions, bug reports, and contributions to */
9 /* ullrich.koethe@iwr.uni-heidelberg.de or */
10 /* vigra@informatik.uni-hamburg.de */
11 /* */
12 /* Permission is hereby granted, free of charge, to any person */
13 /* obtaining a copy of this software and associated documentation */
14 /* files (the "Software"), to deal in the Software without */
15 /* restriction, including without limitation the rights to use, */
16 /* copy, modify, merge, publish, distribute, sublicense, and/or */
17 /* sell copies of the Software, and to permit persons to whom the */
18 /* Software is furnished to do so, subject to the following */
19 /* conditions: */
20 /* */
21 /* The above copyright notice and this permission notice shall be */
22 /* included in all copies or substantial portions of the */
23 /* Software. */
24 /* */
25 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
26 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
27 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
28 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
29 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
30 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
31 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
32 /* OTHER DEALINGS IN THE SOFTWARE. */
33 /* */
34 /************************************************************************/
35 
36 #ifndef VIGRA_THREADING_HXX
37 #define VIGRA_THREADING_HXX
38 
39 /* Compatibility header to import threading-related functionality from boost
40  when the compiler doesn't yet support C++11.
41 */
42 
43  // ignore all threading if VIGRA_SINGLE_THREADED is defined
44 #ifndef VIGRA_SINGLE_THREADED
45 
46 #ifndef VIGRA_NO_STD_THREADING
47 # if defined(__clang__)
48 # if (!__has_include(<thread>) || !__has_include(<mutex>) || !__has_include(<atomic>))
49 # define VIGRA_NO_STD_THREADING
50 # endif
51 # else
52 # if defined(__GNUC__) && (!defined(_GLIBCXX_HAS_GTHREADS) || !defined(_GLIBCXX_USE_C99_STDINT_TR1) || !defined(_GLIBCXX_USE_SCHED_YIELD))
53 # define VIGRA_NO_STD_THREADING
54 # endif
55 # endif
56 
57 # if defined(_MSC_VER) && _MSC_VER <= 1600
58 # define VIGRA_NO_STD_THREADING
59 # endif
60 #endif
61 
62 #ifdef USE_BOOST_THREAD
63 // Use the latest API version for Boost.Thread.
64 #define BOOST_THREAD_VERSION 4
65 # include <boost/thread.hpp>
66 # if BOOST_VERSION >= 105300
67  // At the moment, we only need the atomic headers, not the library.
68 # define BOOST_ATOMIC_NO_LIB 1
69 # include <boost/atomic.hpp>
70 # define VIGRA_HAS_ATOMIC 1
71 # endif
72 # define VIGRA_THREADING_NAMESPACE boost
73 #elif defined(VIGRA_NO_STD_THREADING)
74 # error "Your compiler does not support std::thread. If the boost libraries are available, consider running cmake with -DWITH_BOOST_THREAD=1"
75 #else
76 # include <condition_variable>
77 # include <future>
78 # include <thread>
79 # include <mutex>
80 // # include <shared_mutex> // C++14
81 # include <atomic>
82 # define VIGRA_HAS_ATOMIC 1
83 # define VIGRA_THREADING_NAMESPACE std
84 #endif
85 
86 #if defined(_MSC_VER) && !defined(VIGRA_HAS_ATOMIC)
87 # include "windows.h"
88 #endif
89 
90 namespace vigra { namespace threading {
91 
92 // contents of <thread>
93 
94 using VIGRA_THREADING_NAMESPACE::thread;
95 
96 namespace this_thread {
97 
98 using VIGRA_THREADING_NAMESPACE::this_thread::yield;
99 using VIGRA_THREADING_NAMESPACE::this_thread::get_id;
100 using VIGRA_THREADING_NAMESPACE::this_thread::sleep_for;
101 using VIGRA_THREADING_NAMESPACE::this_thread::sleep_until;
102 
103 } // namespace this_thread
104 
105 // contents of <mutex>
106 
107 using VIGRA_THREADING_NAMESPACE::mutex;
108 using VIGRA_THREADING_NAMESPACE::recursive_mutex;
109 
110 #ifdef __APPLE__
111 # ifdef __GNUC__
112 # ifdef USE_BOOST_THREAD
113  using VIGRA_THREADING_NAMESPACE::timed_mutex;
114  using VIGRA_THREADING_NAMESPACE::recursive_timed_mutex;
115 # endif
116 # else
117  using VIGRA_THREADING_NAMESPACE::timed_mutex;
118  using VIGRA_THREADING_NAMESPACE::recursive_timed_mutex;
119 # endif
120 #else
121  using VIGRA_THREADING_NAMESPACE::timed_mutex;
122  using VIGRA_THREADING_NAMESPACE::recursive_timed_mutex;
123 #endif
124 
125 using VIGRA_THREADING_NAMESPACE::lock_guard;
126 using VIGRA_THREADING_NAMESPACE::unique_lock;
127 
128 using VIGRA_THREADING_NAMESPACE::defer_lock_t;
129 using VIGRA_THREADING_NAMESPACE::try_to_lock_t;
130 using VIGRA_THREADING_NAMESPACE::adopt_lock_t;
131 
132 using VIGRA_THREADING_NAMESPACE::defer_lock;
133 using VIGRA_THREADING_NAMESPACE::try_to_lock;
134 using VIGRA_THREADING_NAMESPACE::adopt_lock;
135 
136 using VIGRA_THREADING_NAMESPACE::try_lock;
137 using VIGRA_THREADING_NAMESPACE::lock;
138 
139 using VIGRA_THREADING_NAMESPACE::once_flag;
140 using VIGRA_THREADING_NAMESPACE::call_once;
141 
142 // contents of <shared_mutex>
143 
144 // using VIGRA_THREADING_NAMESPACE::shared_mutex; // C++14
145 // using VIGRA_THREADING_NAMESPACE::shared_lock; // C++14
146 
147 // Futures.
148 
149 using VIGRA_THREADING_NAMESPACE::future;
150 
151 // Condition variables.
152 
153 using VIGRA_THREADING_NAMESPACE::condition_variable;
154 
155 // Packaged task.
156 
157 using VIGRA_THREADING_NAMESPACE::packaged_task;
158 
159 #ifdef VIGRA_HAS_ATOMIC
160 
161 // contents of <atomic>
162 
163 using VIGRA_THREADING_NAMESPACE::atomic_flag;
164 using VIGRA_THREADING_NAMESPACE::atomic;
165 
166 using VIGRA_THREADING_NAMESPACE::atomic_char;
167 using VIGRA_THREADING_NAMESPACE::atomic_schar;
168 using VIGRA_THREADING_NAMESPACE::atomic_uchar;
169 using VIGRA_THREADING_NAMESPACE::atomic_short;
170 using VIGRA_THREADING_NAMESPACE::atomic_ushort;
171 using VIGRA_THREADING_NAMESPACE::atomic_int;
172 using VIGRA_THREADING_NAMESPACE::atomic_uint;
173 using VIGRA_THREADING_NAMESPACE::atomic_long;
174 using VIGRA_THREADING_NAMESPACE::atomic_ulong;
175 using VIGRA_THREADING_NAMESPACE::atomic_llong;
176 using VIGRA_THREADING_NAMESPACE::atomic_ullong;
177 // using VIGRA_THREADING_NAMESPACE::atomic_char16_t; // not in boost
178 // using VIGRA_THREADING_NAMESPACE::atomic_char32_t; // not in boost
179 using VIGRA_THREADING_NAMESPACE::atomic_wchar_t;
180 using VIGRA_THREADING_NAMESPACE::atomic_int_least8_t;
181 using VIGRA_THREADING_NAMESPACE::atomic_uint_least8_t;
182 using VIGRA_THREADING_NAMESPACE::atomic_int_least16_t;
183 using VIGRA_THREADING_NAMESPACE::atomic_uint_least16_t;
184 using VIGRA_THREADING_NAMESPACE::atomic_int_least32_t;
185 using VIGRA_THREADING_NAMESPACE::atomic_uint_least32_t;
186 using VIGRA_THREADING_NAMESPACE::atomic_int_least64_t;
187 using VIGRA_THREADING_NAMESPACE::atomic_uint_least64_t;
188 using VIGRA_THREADING_NAMESPACE::atomic_int_fast8_t;
189 using VIGRA_THREADING_NAMESPACE::atomic_uint_fast8_t;
190 using VIGRA_THREADING_NAMESPACE::atomic_int_fast16_t;
191 using VIGRA_THREADING_NAMESPACE::atomic_uint_fast16_t;
192 using VIGRA_THREADING_NAMESPACE::atomic_int_fast32_t;
193 using VIGRA_THREADING_NAMESPACE::atomic_uint_fast32_t;
194 using VIGRA_THREADING_NAMESPACE::atomic_int_fast64_t;
195 using VIGRA_THREADING_NAMESPACE::atomic_uint_fast64_t;
196 using VIGRA_THREADING_NAMESPACE::atomic_intptr_t;
197 using VIGRA_THREADING_NAMESPACE::atomic_uintptr_t;
198 using VIGRA_THREADING_NAMESPACE::atomic_size_t;
199 using VIGRA_THREADING_NAMESPACE::atomic_ptrdiff_t;
200 using VIGRA_THREADING_NAMESPACE::atomic_intmax_t;
201 using VIGRA_THREADING_NAMESPACE::atomic_uintmax_t;
202 
203 using VIGRA_THREADING_NAMESPACE::memory_order;
204 using VIGRA_THREADING_NAMESPACE::memory_order_relaxed;
205 using VIGRA_THREADING_NAMESPACE::memory_order_release;
206 using VIGRA_THREADING_NAMESPACE::memory_order_acquire;
207 using VIGRA_THREADING_NAMESPACE::memory_order_consume;
208 using VIGRA_THREADING_NAMESPACE::memory_order_acq_rel;
209 using VIGRA_THREADING_NAMESPACE::memory_order_seq_cst;
210 
211 using VIGRA_THREADING_NAMESPACE::atomic_thread_fence;
212 using VIGRA_THREADING_NAMESPACE::atomic_signal_fence;
213 
214 // using VIGRA_THREADING_NAMESPACE::atomic_is_lock_free;
215 // using VIGRA_THREADING_NAMESPACE::atomic_storeatomic_store_explicit;
216 // using VIGRA_THREADING_NAMESPACE::atomic_loadatomic_load_explicit;
217 // using VIGRA_THREADING_NAMESPACE::atomic_exchangeatomic_exchange_explicit;
218 // using VIGRA_THREADING_NAMESPACE::atomic_compare_exchange_weak;
219 // using VIGRA_THREADING_NAMESPACE::atomic_compare_exchange_weak_explicit;
220 // using VIGRA_THREADING_NAMESPACE::atomic_compare_exchange_strong;
221 // using VIGRA_THREADING_NAMESPACE::atomic_compare_exchange_strong_explicit;
222 // using VIGRA_THREADING_NAMESPACE::atomic_fetch_addatomic_fetch_add_explicit;
223 // using VIGRA_THREADING_NAMESPACE::atomic_fetch_subatomic_fetch_sub_explicit;
224 // using VIGRA_THREADING_NAMESPACE::atomic_fetch_andatomic_fetch_and_explicit;
225 // using VIGRA_THREADING_NAMESPACE::atomic_fetch_oratomic_fetch_or_explicit;
226 // using VIGRA_THREADING_NAMESPACE::atomic_fetch_xoratomic_fetch_xor_explicit;
227 // using VIGRA_THREADING_NAMESPACE::atomic_flag_test_and_setatomic_flag_test_and_set_explicit;
228 // using VIGRA_THREADING_NAMESPACE::atomic_flag_clearatomic_flag_clear_explicit;
229 // using VIGRA_THREADING_NAMESPACE::atomic_init;
230 // using VIGRA_THREADING_NAMESPACE::kill_dependency;
231 
232 #else // VIGRA_HAS_ATOMIC not defined
233 
234 enum memory_order {
235  memory_order_relaxed,
236  memory_order_release,
237  memory_order_acquire,
238  memory_order_consume,
239  memory_order_acq_rel,
240  memory_order_seq_cst
241 };
242 
243 #ifdef _MSC_VER
244 
245 template <int SIZE=4>
246 struct atomic_long_impl
247 {
248  typedef LONG value_type;
249 
250  static long load(value_type const & val)
251  {
252  long res = val;
253  MemoryBarrier();
254  return res;
255  }
256 
257  static void store(value_type & dest, long val)
258  {
259  MemoryBarrier();
260  dest = val;
261  }
262 
263  static long add(value_type & dest, long val)
264  {
265  return InterlockedExchangeAdd(&dest, val);
266  }
267 
268  static long sub(value_type & dest, long val)
269  {
270  return InterlockedExchangeAdd(&dest, -val);
271  }
272 
273  static bool compare_exchange(value_type & dest, long & old_val, long new_val)
274  {
275  long check_val = old_val;
276  old_val = InterlockedCompareExchange(&dest, new_val, old_val);
277  return check_val == old_val;
278  }
279 };
280 
281 template <>
282 struct atomic_long_impl<8>
283 {
284  typedef LONGLONG value_type;
285 
286  static long load(value_type const & val)
287  {
288  long res = val;
289  MemoryBarrier();
290  return res;
291  }
292 
293  static void store(value_type & dest, long val)
294  {
295  MemoryBarrier();
296  dest = val;
297  }
298 
299  static long add(value_type & dest, long val)
300  {
301  return InterlockedExchangeAdd64(&dest, val);
302  }
303 
304  static long sub(value_type & dest, long val)
305  {
306  return InterlockedExchangeAdd64(&dest, -val);
307  }
308 
309  static bool compare_exchange(value_type & dest, long & old_val, long new_val)
310  {
311  long check_val = old_val;
312  old_val = InterlockedCompareExchange64(&dest, new_val, old_val);
313  return check_val == old_val;
314  }
315 };
316 
317 #else
318 
319 template <int SIZE=4>
320 struct atomic_long_impl
321 {
322  typedef long value_type;
323 
324  static long load(value_type const & val)
325  {
326  long res = val;
327  __sync_synchronize();
328  return res;
329  }
330 
331  static void store(value_type & dest, long val)
332  {
333  __sync_synchronize();
334  dest = val;
335  }
336 
337  static long add(value_type & dest, long val)
338  {
339  return __sync_fetch_and_add(&dest, val);
340  }
341 
342  static long sub(value_type & dest, long val)
343  {
344  return __sync_fetch_and_sub(&dest, val);
345  }
346 
347  static bool compare_exchange(value_type & dest, long & old_val, long new_val)
348  {
349  long check_val = old_val;
350  old_val = __sync_val_compare_and_swap(&dest, old_val, new_val);
351  return check_val == old_val;
352  }
353 };
354 
355 #endif // _MSC_VER
356 
357 struct atomic_long
358 {
359  typedef atomic_long_impl<sizeof(long)>::value_type value_type;
360 
361  atomic_long(long v = 0)
362  : value_(v)
363  {}
364 
365  atomic_long & operator=(long val)
366  {
367  store(val);
368  return *this;
369  }
370 
371  bool operator==(long val) const
372  {
373  return load() == val;
374  }
375 
376  void operator++()
377  {
378  fetch_add(1);
379  }
380 
381  void operator--()
382  {
383  fetch_sub(1);
384  }
385 
386  long load(memory_order = memory_order_seq_cst) const
387  {
388  return atomic_long_impl<sizeof(long)>::load(value_);
389  }
390 
391  void store(long v, memory_order = memory_order_seq_cst)
392  {
393  atomic_long_impl<sizeof(long)>::store(value_, v);
394  }
395 
396  long fetch_add(long v, memory_order = memory_order_seq_cst)
397  {
398  return atomic_long_impl<sizeof(long)>::add(value_, v);
399  }
400 
401  long fetch_sub(long v, memory_order = memory_order_seq_cst)
402  {
403  return atomic_long_impl<sizeof(long)>::sub(value_, v);
404  }
405 
406  bool compare_exchange_strong(long & old_val, long new_val, memory_order = memory_order_seq_cst)
407  {
408  return atomic_long_impl<sizeof(long)>::compare_exchange(value_, old_val, new_val);
409  }
410 
411  bool compare_exchange_weak(long & old_val, long new_val, memory_order = memory_order_seq_cst)
412  {
413  return atomic_long_impl<sizeof(long)>::compare_exchange(value_, old_val, new_val);
414  }
415 
416  value_type value_;
417 };
418 
419 #endif // VIGRA_HAS_ATOMIC
420 
421 }} // namespace vigra::threading
422 
423 #undef VIGRA_THREADING_NAMESPACE
424 
425 #endif // not VIGRA_SINGLE_THREADED
426 
427 #endif // VIGRA_THREADING_HXX
void sub(FixedPoint< IntBits1, FracBits1 > l, FixedPoint< IntBits2, FracBits2 > r, FixedPoint< IntBits3, FracBits3 > &result)
subtraction with enforced result type.
Definition: fixedpoint.hxx:583
void add(FixedPoint< IntBits1, FracBits1 > l, FixedPoint< IntBits2, FracBits2 > r, FixedPoint< IntBits3, FracBits3 > &result)
addition with enforced result type.
Definition: fixedpoint.hxx:561
bool operator==(FFTWComplex< R > const &a, const FFTWComplex< R > &b)
equal
Definition: fftw3.hxx:825

© Ullrich Köthe (ullrich.koethe@iwr.uni-heidelberg.de)
Heidelberg Collaboratory for Image Processing, University of Heidelberg, Germany

html generated using doxygen and Python
vigra 1.11.1 (Fri May 19 2017)