[ VIGRA Homepage | Function Index | Class Index | Namespaces | File List | Main Page ]

threading.hxx VIGRA

1 /************************************************************************/
2 /* */
3 /* Copyright 2013-2014 by Ullrich Koethe */
4 /* */
5 /* This file is part of the VIGRA computer vision library. */
6 /* The VIGRA Website is */
7 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
8 /* Please direct questions, bug reports, and contributions to */
9 /* ullrich.koethe@iwr.uni-heidelberg.de or */
10 /* vigra@informatik.uni-hamburg.de */
11 /* */
12 /* Permission is hereby granted, free of charge, to any person */
13 /* obtaining a copy of this software and associated documentation */
14 /* files (the "Software"), to deal in the Software without */
15 /* restriction, including without limitation the rights to use, */
16 /* copy, modify, merge, publish, distribute, sublicense, and/or */
17 /* sell copies of the Software, and to permit persons to whom the */
18 /* Software is furnished to do so, subject to the following */
19 /* conditions: */
20 /* */
21 /* The above copyright notice and this permission notice shall be */
22 /* included in all copies or substantial portions of the */
23 /* Software. */
24 /* */
25 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
26 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
27 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
28 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
29 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
30 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
31 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
32 /* OTHER DEALINGS IN THE SOFTWARE. */
33 /* */
34 /************************************************************************/
35 
36 #ifndef VIGRA_THREADING_HXX
37 #define VIGRA_THREADING_HXX
38 
39 /* Compatibility header to import threading-related functionality from boost
40  when the compiler doesn't yet support C++11.
41 */
42 
43  // ignore all threading if VIGRA_SINGLE_THREADED is defined
44 #ifndef VIGRA_SINGLE_THREADED
45 
46 #ifndef VIGRA_NO_STD_THREADING
47 # if defined(__clang__)
48 # if (!__has_include(<thread>) || !__has_include(<mutex>) || !__has_include(<atomic>))
49 # define VIGRA_NO_STD_THREADING
50 # endif
51 # else
52 # if defined(__GNUC__) && (!defined(_GLIBCXX_HAS_GTHREADS) || !defined(_GLIBCXX_USE_C99_STDINT_TR1) || !defined(_GLIBCXX_USE_SCHED_YIELD))
53 # define VIGRA_NO_STD_THREADING
54 # endif
55 # endif
56 
57 # if defined(_MSC_VER) && _MSC_VER <= 1600
58 # define VIGRA_NO_STD_THREADING
59 # endif
60 #endif
61 
62 #ifdef USE_BOOST_THREAD
63 # include <boost/thread.hpp>
64 # if BOOST_VERSION >= 105300
65 # include <boost/atomic.hpp>
66 # define VIGRA_HAS_ATOMIC 1
67 # endif
68 # define VIGRA_THREADING_NAMESPACE boost
69 #elif defined(VIGRA_NO_STD_THREADING)
70 # error "Your compiler does not support std::thread. If the boost libraries are available, consider running cmake with -DWITH_BOOST_THREAD=1"
71 #else
72 # include <thread>
73 # include <mutex>
74 // # include <shared_mutex> // C++14
75 # include <atomic>
76 # define VIGRA_HAS_ATOMIC 1
77 # define VIGRA_THREADING_NAMESPACE std
78 #endif
79 
80 #if defined(_MSC_VER) && !defined(VIGRA_HAS_ATOMIC)
81 # include "windows.h"
82 #endif
83 
84 namespace vigra { namespace threading {
85 
86 // contents of <thread>
87 
88 using VIGRA_THREADING_NAMESPACE::thread;
89 
90 namespace this_thread {
91 
92 using VIGRA_THREADING_NAMESPACE::this_thread::yield;
93 using VIGRA_THREADING_NAMESPACE::this_thread::get_id;
94 using VIGRA_THREADING_NAMESPACE::this_thread::sleep_for;
95 using VIGRA_THREADING_NAMESPACE::this_thread::sleep_until;
96 
97 } // namespace this_thread
98 
99 // contents of <mutex>
100 
101 using VIGRA_THREADING_NAMESPACE::mutex;
102 using VIGRA_THREADING_NAMESPACE::timed_mutex;
103 using VIGRA_THREADING_NAMESPACE::recursive_mutex;
104 using VIGRA_THREADING_NAMESPACE::recursive_timed_mutex;
105 
106 using VIGRA_THREADING_NAMESPACE::lock_guard;
107 using VIGRA_THREADING_NAMESPACE::unique_lock;
108 
109 using VIGRA_THREADING_NAMESPACE::defer_lock_t;
110 using VIGRA_THREADING_NAMESPACE::try_to_lock_t;
111 using VIGRA_THREADING_NAMESPACE::adopt_lock_t;
112 
113 using VIGRA_THREADING_NAMESPACE::defer_lock;
114 using VIGRA_THREADING_NAMESPACE::try_to_lock;
115 using VIGRA_THREADING_NAMESPACE::adopt_lock;
116 
117 using VIGRA_THREADING_NAMESPACE::try_lock;
118 using VIGRA_THREADING_NAMESPACE::lock;
119 
120 using VIGRA_THREADING_NAMESPACE::once_flag;
121 using VIGRA_THREADING_NAMESPACE::call_once;
122 
123 // contents of <shared_mutex>
124 
125 // using VIGRA_THREADING_NAMESPACE::shared_mutex; // C++14
126 // using VIGRA_THREADING_NAMESPACE::shared_lock; // C++14
127 
128 #ifdef VIGRA_HAS_ATOMIC
129 
130 // contents of <atomic>
131 
132 using VIGRA_THREADING_NAMESPACE::atomic_flag;
133 using VIGRA_THREADING_NAMESPACE::atomic;
134 
135 using VIGRA_THREADING_NAMESPACE::atomic_char;
136 using VIGRA_THREADING_NAMESPACE::atomic_schar;
137 using VIGRA_THREADING_NAMESPACE::atomic_uchar;
138 using VIGRA_THREADING_NAMESPACE::atomic_short;
139 using VIGRA_THREADING_NAMESPACE::atomic_ushort;
140 using VIGRA_THREADING_NAMESPACE::atomic_int;
141 using VIGRA_THREADING_NAMESPACE::atomic_uint;
142 using VIGRA_THREADING_NAMESPACE::atomic_long;
143 using VIGRA_THREADING_NAMESPACE::atomic_ulong;
144 using VIGRA_THREADING_NAMESPACE::atomic_llong;
145 using VIGRA_THREADING_NAMESPACE::atomic_ullong;
146 // using VIGRA_THREADING_NAMESPACE::atomic_char16_t; // not in boost
147 // using VIGRA_THREADING_NAMESPACE::atomic_char32_t; // not in boost
148 using VIGRA_THREADING_NAMESPACE::atomic_wchar_t;
149 using VIGRA_THREADING_NAMESPACE::atomic_int_least8_t;
150 using VIGRA_THREADING_NAMESPACE::atomic_uint_least8_t;
151 using VIGRA_THREADING_NAMESPACE::atomic_int_least16_t;
152 using VIGRA_THREADING_NAMESPACE::atomic_uint_least16_t;
153 using VIGRA_THREADING_NAMESPACE::atomic_int_least32_t;
154 using VIGRA_THREADING_NAMESPACE::atomic_uint_least32_t;
155 using VIGRA_THREADING_NAMESPACE::atomic_int_least64_t;
156 using VIGRA_THREADING_NAMESPACE::atomic_uint_least64_t;
157 using VIGRA_THREADING_NAMESPACE::atomic_int_fast8_t;
158 using VIGRA_THREADING_NAMESPACE::atomic_uint_fast8_t;
159 using VIGRA_THREADING_NAMESPACE::atomic_int_fast16_t;
160 using VIGRA_THREADING_NAMESPACE::atomic_uint_fast16_t;
161 using VIGRA_THREADING_NAMESPACE::atomic_int_fast32_t;
162 using VIGRA_THREADING_NAMESPACE::atomic_uint_fast32_t;
163 using VIGRA_THREADING_NAMESPACE::atomic_int_fast64_t;
164 using VIGRA_THREADING_NAMESPACE::atomic_uint_fast64_t;
165 using VIGRA_THREADING_NAMESPACE::atomic_intptr_t;
166 using VIGRA_THREADING_NAMESPACE::atomic_uintptr_t;
167 using VIGRA_THREADING_NAMESPACE::atomic_size_t;
168 using VIGRA_THREADING_NAMESPACE::atomic_ptrdiff_t;
169 using VIGRA_THREADING_NAMESPACE::atomic_intmax_t;
170 using VIGRA_THREADING_NAMESPACE::atomic_uintmax_t;
171 
172 using VIGRA_THREADING_NAMESPACE::memory_order;
173 using VIGRA_THREADING_NAMESPACE::memory_order_relaxed;
174 using VIGRA_THREADING_NAMESPACE::memory_order_release;
175 using VIGRA_THREADING_NAMESPACE::memory_order_acquire;
176 using VIGRA_THREADING_NAMESPACE::memory_order_consume;
177 using VIGRA_THREADING_NAMESPACE::memory_order_acq_rel;
178 using VIGRA_THREADING_NAMESPACE::memory_order_seq_cst;
179 
180 using VIGRA_THREADING_NAMESPACE::atomic_thread_fence;
181 using VIGRA_THREADING_NAMESPACE::atomic_signal_fence;
182 
183 // using VIGRA_THREADING_NAMESPACE::atomic_is_lock_free;
184 // using VIGRA_THREADING_NAMESPACE::atomic_storeatomic_store_explicit;
185 // using VIGRA_THREADING_NAMESPACE::atomic_loadatomic_load_explicit;
186 // using VIGRA_THREADING_NAMESPACE::atomic_exchangeatomic_exchange_explicit;
187 // using VIGRA_THREADING_NAMESPACE::atomic_compare_exchange_weak;
188 // using VIGRA_THREADING_NAMESPACE::atomic_compare_exchange_weak_explicit;
189 // using VIGRA_THREADING_NAMESPACE::atomic_compare_exchange_strong;
190 // using VIGRA_THREADING_NAMESPACE::atomic_compare_exchange_strong_explicit;
191 // using VIGRA_THREADING_NAMESPACE::atomic_fetch_addatomic_fetch_add_explicit;
192 // using VIGRA_THREADING_NAMESPACE::atomic_fetch_subatomic_fetch_sub_explicit;
193 // using VIGRA_THREADING_NAMESPACE::atomic_fetch_andatomic_fetch_and_explicit;
194 // using VIGRA_THREADING_NAMESPACE::atomic_fetch_oratomic_fetch_or_explicit;
195 // using VIGRA_THREADING_NAMESPACE::atomic_fetch_xoratomic_fetch_xor_explicit;
196 // using VIGRA_THREADING_NAMESPACE::atomic_flag_test_and_setatomic_flag_test_and_set_explicit;
197 // using VIGRA_THREADING_NAMESPACE::atomic_flag_clearatomic_flag_clear_explicit;
198 // using VIGRA_THREADING_NAMESPACE::atomic_init;
199 // using VIGRA_THREADING_NAMESPACE::kill_dependency;
200 
201 #else // VIGRA_HAS_ATOMIC not defined
202 
203 enum memory_order {
204  memory_order_relaxed,
205  memory_order_release,
206  memory_order_acquire,
207  memory_order_consume,
208  memory_order_acq_rel,
209  memory_order_seq_cst
210 };
211 
212 #ifdef _MSC_VER
213 
214 template <int SIZE=4>
215 struct atomic_long_impl
216 {
217  typedef LONG value_type;
218 
219  static long load(value_type const & val)
220  {
221  long res = val;
222  MemoryBarrier();
223  return res;
224  }
225 
226  static void store(value_type & dest, long val)
227  {
228  MemoryBarrier();
229  dest = val;
230  }
231 
232  static long add(value_type & dest, long val)
233  {
234  return InterlockedExchangeAdd(&dest, val);
235  }
236 
237  static long sub(value_type & dest, long val)
238  {
239  return InterlockedExchangeAdd(&dest, -val);
240  }
241 
242  static bool compare_exchange(value_type & dest, long & old_val, long new_val)
243  {
244  long check_val = old_val;
245  old_val = InterlockedCompareExchange(&dest, new_val, old_val);
246  return check_val == old_val;
247  }
248 };
249 
250 template <>
251 struct atomic_long_impl<8>
252 {
253  typedef LONGLONG value_type;
254 
255  static long load(value_type const & val)
256  {
257  long res = val;
258  MemoryBarrier();
259  return res;
260  }
261 
262  static void store(value_type & dest, long val)
263  {
264  MemoryBarrier();
265  dest = val;
266  }
267 
268  static long add(value_type & dest, long val)
269  {
270  return InterlockedExchangeAdd64(&dest, val);
271  }
272 
273  static long sub(value_type & dest, long val)
274  {
275  return InterlockedExchangeAdd64(&dest, -val);
276  }
277 
278  static bool compare_exchange(value_type & dest, long & old_val, long new_val)
279  {
280  long check_val = old_val;
281  old_val = InterlockedCompareExchange64(&dest, new_val, old_val);
282  return check_val == old_val;
283  }
284 };
285 
286 #else
287 
288 template <int SIZE=4>
289 struct atomic_long_impl
290 {
291  typedef long value_type;
292 
293  static long load(value_type const & val)
294  {
295  long res = val;
296  __sync_synchronize();
297  return res;
298  }
299 
300  static void store(value_type & dest, long val)
301  {
302  __sync_synchronize();
303  dest = val;
304  }
305 
306  static long add(value_type & dest, long val)
307  {
308  return __sync_fetch_and_add(&dest, val);
309  }
310 
311  static long sub(value_type & dest, long val)
312  {
313  return __sync_fetch_and_sub(&dest, val);
314  }
315 
316  static bool compare_exchange(value_type & dest, long & old_val, long new_val)
317  {
318  long check_val = old_val;
319  old_val = __sync_val_compare_and_swap(&dest, old_val, new_val);
320  return check_val == old_val;
321  }
322 };
323 
324 #endif // _MSC_VER
325 
326 struct atomic_long
327 {
328  typedef atomic_long_impl<sizeof(long)>::value_type value_type;
329 
330  atomic_long(long v = 0)
331  : value_(v)
332  {}
333 
334  atomic_long & operator=(long val)
335  {
336  store(val);
337  return *this;
338  }
339 
340  long load(memory_order = memory_order_seq_cst) const
341  {
342  return atomic_long_impl<sizeof(long)>::load(value_);
343  }
344 
345  void store(long v, memory_order = memory_order_seq_cst)
346  {
347  atomic_long_impl<sizeof(long)>::store(value_, v);
348  }
349 
350  long fetch_add(long v, memory_order = memory_order_seq_cst)
351  {
352  return atomic_long_impl<sizeof(long)>::add(value_, v);
353  }
354 
355  long fetch_sub(long v, memory_order = memory_order_seq_cst)
356  {
357  return atomic_long_impl<sizeof(long)>::sub(value_, v);
358  }
359 
360  bool compare_exchange_strong(long & old_val, long new_val, memory_order = memory_order_seq_cst)
361  {
362  return atomic_long_impl<sizeof(long)>::compare_exchange(value_, old_val, new_val);
363  }
364 
365  bool compare_exchange_weak(long & old_val, long new_val, memory_order = memory_order_seq_cst)
366  {
367  return atomic_long_impl<sizeof(long)>::compare_exchange(value_, old_val, new_val);
368  }
369 
370  value_type value_;
371 };
372 
373 #endif // VIGRA_HAS_ATOMIC
374 
375 }} // namespace vigra::threading
376 
377 #undef VIGRA_THREADING_NAMESPACE
378 
379 #endif // not VIGRA_SINGLE_THREADED
380 
381 #endif // VIGRA_THREADING_HXX
void sub(FixedPoint< IntBits1, FracBits1 > l, FixedPoint< IntBits2, FracBits2 > r, FixedPoint< IntBits3, FracBits3 > &result)
subtraction with enforced result type.
Definition: fixedpoint.hxx:583
void add(FixedPoint< IntBits1, FracBits1 > l, FixedPoint< IntBits2, FracBits2 > r, FixedPoint< IntBits3, FracBits3 > &result)
addition with enforced result type.
Definition: fixedpoint.hxx:561
Definition: accessor.hxx:43

© Ullrich Köthe (ullrich.koethe@iwr.uni-heidelberg.de)
Heidelberg Collaboratory for Image Processing, University of Heidelberg, Germany

html generated using doxygen and Python
vigra 1.10.0