parallel.py 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. """
  2. Helpers for embarrassingly parallel code.
  3. """
  4. # Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
  5. # Copyright: 2010, Gael Varoquaux
  6. # License: BSD 3 clause
  7. from __future__ import division
  8. import os
  9. import sys
  10. from math import sqrt
  11. import functools
  12. import time
  13. import threading
  14. import itertools
  15. from uuid import uuid4
  16. from numbers import Integral
  17. import warnings
  18. import queue
  19. from ._multiprocessing_helpers import mp
  20. from .logger import Logger, short_format_time
  21. from .disk import memstr_to_bytes
  22. from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend,
  23. ThreadingBackend, SequentialBackend,
  24. LokyBackend)
  25. from .externals.cloudpickle import dumps, loads
  26. from .externals import loky
  27. # Make sure that those two classes are part of the public joblib.parallel API
  28. # so that 3rd party backend implementers can import them from here.
  29. from ._parallel_backends import AutoBatchingMixin # noqa
  30. from ._parallel_backends import ParallelBackendBase # noqa
  31. BACKENDS = {
  32. 'multiprocessing': MultiprocessingBackend,
  33. 'threading': ThreadingBackend,
  34. 'sequential': SequentialBackend,
  35. 'loky': LokyBackend,
  36. }
  37. # name of the backend used by default by Parallel outside of any context
  38. # managed by ``parallel_backend``.
  39. DEFAULT_BACKEND = 'loky'
  40. DEFAULT_N_JOBS = 1
  41. DEFAULT_THREAD_BACKEND = 'threading'
  42. # Thread local value that can be overridden by the ``parallel_backend`` context
  43. # manager
  44. _backend = threading.local()
  45. VALID_BACKEND_HINTS = ('processes', 'threads', None)
  46. VALID_BACKEND_CONSTRAINTS = ('sharedmem', None)
  47. def _register_dask():
  48. """ Register Dask Backend if called with parallel_backend("dask") """
  49. try:
  50. from ._dask import DaskDistributedBackend
  51. register_parallel_backend('dask', DaskDistributedBackend)
  52. except ImportError as e:
  53. msg = ("To use the dask.distributed backend you must install both "
  54. "the `dask` and distributed modules.\n\n"
  55. "See https://dask.pydata.org/en/latest/install.html for more "
  56. "information.")
  57. raise ImportError(msg) from e
  58. EXTERNAL_BACKENDS = {
  59. 'dask': _register_dask,
  60. }
  61. def get_active_backend(prefer=None, require=None, verbose=0):
  62. """Return the active default backend"""
  63. if prefer not in VALID_BACKEND_HINTS:
  64. raise ValueError("prefer=%r is not a valid backend hint, "
  65. "expected one of %r" % (prefer, VALID_BACKEND_HINTS))
  66. if require not in VALID_BACKEND_CONSTRAINTS:
  67. raise ValueError("require=%r is not a valid backend constraint, "
  68. "expected one of %r"
  69. % (require, VALID_BACKEND_CONSTRAINTS))
  70. if prefer == 'processes' and require == 'sharedmem':
  71. raise ValueError("prefer == 'processes' and require == 'sharedmem'"
  72. " are inconsistent settings")
  73. backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
  74. if backend_and_jobs is not None:
  75. # Try to use the backend set by the user with the context manager.
  76. backend, n_jobs = backend_and_jobs
  77. nesting_level = backend.nesting_level
  78. supports_sharedmem = getattr(backend, 'supports_sharedmem', False)
  79. if require == 'sharedmem' and not supports_sharedmem:
  80. # This backend does not match the shared memory constraint:
  81. # fallback to the default thead-based backend.
  82. sharedmem_backend = BACKENDS[DEFAULT_THREAD_BACKEND](
  83. nesting_level=nesting_level)
  84. if verbose >= 10:
  85. print("Using %s as joblib.Parallel backend instead of %s "
  86. "as the latter does not provide shared memory semantics."
  87. % (sharedmem_backend.__class__.__name__,
  88. backend.__class__.__name__))
  89. return sharedmem_backend, DEFAULT_N_JOBS
  90. else:
  91. return backend_and_jobs
  92. # We are outside of the scope of any parallel_backend context manager,
  93. # create the default backend instance now.
  94. backend = BACKENDS[DEFAULT_BACKEND](nesting_level=0)
  95. supports_sharedmem = getattr(backend, 'supports_sharedmem', False)
  96. uses_threads = getattr(backend, 'uses_threads', False)
  97. if ((require == 'sharedmem' and not supports_sharedmem) or
  98. (prefer == 'threads' and not uses_threads)):
  99. # Make sure the selected default backend match the soft hints and
  100. # hard constraints:
  101. backend = BACKENDS[DEFAULT_THREAD_BACKEND](nesting_level=0)
  102. return backend, DEFAULT_N_JOBS
  103. class parallel_backend(object):
  104. """Change the default backend used by Parallel inside a with block.
  105. If ``backend`` is a string it must match a previously registered
  106. implementation using the ``register_parallel_backend`` function.
  107. By default the following backends are available:
  108. - 'loky': single-host, process-based parallelism (used by default),
  109. - 'threading': single-host, thread-based parallelism,
  110. - 'multiprocessing': legacy single-host, process-based parallelism.
  111. 'loky' is recommended to run functions that manipulate Python objects.
  112. 'threading' is a low-overhead alternative that is most efficient for
  113. functions that release the Global Interpreter Lock: e.g. I/O-bound code or
  114. CPU-bound code in a few calls to native code that explicitly releases the
  115. GIL.
  116. In addition, if the `dask` and `distributed` Python packages are installed,
  117. it is possible to use the 'dask' backend for better scheduling of nested
  118. parallel calls without over-subscription and potentially distribute
  119. parallel calls over a networked cluster of several hosts.
  120. Alternatively the backend can be passed directly as an instance.
  121. By default all available workers will be used (``n_jobs=-1``) unless the
  122. caller passes an explicit value for the ``n_jobs`` parameter.
  123. This is an alternative to passing a ``backend='backend_name'`` argument to
  124. the ``Parallel`` class constructor. It is particularly useful when calling
  125. into library code that uses joblib internally but does not expose the
  126. backend argument in its own API.
  127. >>> from operator import neg
  128. >>> with parallel_backend('threading'):
  129. ... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
  130. ...
  131. [-1, -2, -3, -4, -5]
  132. Warning: this function is experimental and subject to change in a future
  133. version of joblib.
  134. Joblib also tries to limit the oversubscription by limiting the number of
  135. threads usable in some third-party library threadpools like OpenBLAS, MKL
  136. or OpenMP. The default limit in each worker is set to
  137. ``max(cpu_count() // effective_n_jobs, 1)`` but this limit can be
  138. overwritten with the ``inner_max_num_threads`` argument which will be used
  139. to set this limit in the child processes.
  140. .. versionadded:: 0.10
  141. """
  142. def __init__(self, backend, n_jobs=-1, inner_max_num_threads=None,
  143. **backend_params):
  144. if isinstance(backend, str):
  145. if backend not in BACKENDS and backend in EXTERNAL_BACKENDS:
  146. register = EXTERNAL_BACKENDS[backend]
  147. register()
  148. backend = BACKENDS[backend](**backend_params)
  149. if inner_max_num_threads is not None:
  150. msg = ("{} does not accept setting the inner_max_num_threads "
  151. "argument.".format(backend.__class__.__name__))
  152. assert backend.supports_inner_max_num_threads, msg
  153. backend.inner_max_num_threads = inner_max_num_threads
  154. # If the nesting_level of the backend is not set previously, use the
  155. # nesting level from the previous active_backend to set it
  156. current_backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
  157. if backend.nesting_level is None:
  158. if current_backend_and_jobs is None:
  159. nesting_level = 0
  160. else:
  161. nesting_level = current_backend_and_jobs[0].nesting_level
  162. backend.nesting_level = nesting_level
  163. # Save the backends info and set the active backend
  164. self.old_backend_and_jobs = current_backend_and_jobs
  165. self.new_backend_and_jobs = (backend, n_jobs)
  166. _backend.backend_and_jobs = (backend, n_jobs)
  167. def __enter__(self):
  168. return self.new_backend_and_jobs
  169. def __exit__(self, type, value, traceback):
  170. self.unregister()
  171. def unregister(self):
  172. if self.old_backend_and_jobs is None:
  173. if getattr(_backend, 'backend_and_jobs', None) is not None:
  174. del _backend.backend_and_jobs
  175. else:
  176. _backend.backend_and_jobs = self.old_backend_and_jobs
  177. # Under Linux or OS X the default start method of multiprocessing
  178. # can cause third party libraries to crash. Under Python 3.4+ it is possible
  179. # to set an environment variable to switch the default start method from
  180. # 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost
  181. # of causing semantic changes and some additional pool instantiation overhead.
  182. DEFAULT_MP_CONTEXT = None
  183. if hasattr(mp, 'get_context'):
  184. method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None
  185. if method is not None:
  186. DEFAULT_MP_CONTEXT = mp.get_context(method=method)
  187. class BatchedCalls(object):
  188. """Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
  189. def __init__(self, iterator_slice, backend_and_jobs, reducer_callback=None,
  190. pickle_cache=None):
  191. self.items = list(iterator_slice)
  192. self._size = len(self.items)
  193. self._reducer_callback = reducer_callback
  194. if isinstance(backend_and_jobs, tuple):
  195. self._backend, self._n_jobs = backend_and_jobs
  196. else:
  197. # this is for backward compatibility purposes. Before 0.12.6,
  198. # nested backends were returned without n_jobs indications.
  199. self._backend, self._n_jobs = backend_and_jobs, None
  200. self._pickle_cache = pickle_cache if pickle_cache is not None else {}
  201. def __call__(self):
  202. # Set the default nested backend to self._backend but do not set the
  203. # change the default number of processes to -1
  204. with parallel_backend(self._backend, n_jobs=self._n_jobs):
  205. return [func(*args, **kwargs)
  206. for func, args, kwargs in self.items]
  207. def __reduce__(self):
  208. if self._reducer_callback is not None:
  209. self._reducer_callback()
  210. # no need pickle the callback.
  211. return (
  212. BatchedCalls,
  213. (self.items, (self._backend, self._n_jobs), None,
  214. self._pickle_cache)
  215. )
  216. def __len__(self):
  217. return self._size
  218. ###############################################################################
  219. # CPU count that works also when multiprocessing has been disabled via
  220. # the JOBLIB_MULTIPROCESSING environment variable
  221. def cpu_count():
  222. """Return the number of CPUs."""
  223. if mp is None:
  224. return 1
  225. return loky.cpu_count()
  226. ###############################################################################
  227. # For verbosity
  228. def _verbosity_filter(index, verbose):
  229. """ Returns False for indices increasingly apart, the distance
  230. depending on the value of verbose.
  231. We use a lag increasing as the square of index
  232. """
  233. if not verbose:
  234. return True
  235. elif verbose > 10:
  236. return False
  237. if index == 0:
  238. return False
  239. verbose = .5 * (11 - verbose) ** 2
  240. scale = sqrt(index / verbose)
  241. next_scale = sqrt((index + 1) / verbose)
  242. return (int(next_scale) == int(scale))
  243. ###############################################################################
  244. def delayed(function, check_pickle=None):
  245. """Decorator used to capture the arguments of a function."""
  246. if check_pickle is not None:
  247. warnings.warn('check_pickle is deprecated in joblib 0.12 and will be'
  248. ' removed in 0.13', DeprecationWarning)
  249. # Try to pickle the input function, to catch the problems early when
  250. # using with multiprocessing:
  251. if check_pickle:
  252. dumps(function)
  253. def delayed_function(*args, **kwargs):
  254. return function, args, kwargs
  255. try:
  256. delayed_function = functools.wraps(function)(delayed_function)
  257. except AttributeError:
  258. " functools.wraps fails on some callable objects "
  259. return delayed_function
  260. ###############################################################################
  261. class BatchCompletionCallBack(object):
  262. """Callback used by joblib.Parallel's multiprocessing backend.
  263. This callable is executed by the parent process whenever a worker process
  264. has returned the results of a batch of tasks.
  265. It is used for progress reporting, to update estimate of the batch
  266. processing duration and to schedule the next batch of tasks to be
  267. processed.
  268. """
  269. def __init__(self, dispatch_timestamp, batch_size, parallel):
  270. self.dispatch_timestamp = dispatch_timestamp
  271. self.batch_size = batch_size
  272. self.parallel = parallel
  273. def __call__(self, out):
  274. self.parallel.n_completed_tasks += self.batch_size
  275. this_batch_duration = time.time() - self.dispatch_timestamp
  276. self.parallel._backend.batch_completed(self.batch_size,
  277. this_batch_duration)
  278. self.parallel.print_progress()
  279. with self.parallel._lock:
  280. if self.parallel._original_iterator is not None:
  281. self.parallel.dispatch_next()
  282. ###############################################################################
  283. def register_parallel_backend(name, factory, make_default=False):
  284. """Register a new Parallel backend factory.
  285. The new backend can then be selected by passing its name as the backend
  286. argument to the Parallel class. Moreover, the default backend can be
  287. overwritten globally by setting make_default=True.
  288. The factory can be any callable that takes no argument and return an
  289. instance of ``ParallelBackendBase``.
  290. Warning: this function is experimental and subject to change in a future
  291. version of joblib.
  292. .. versionadded:: 0.10
  293. """
  294. BACKENDS[name] = factory
  295. if make_default:
  296. global DEFAULT_BACKEND
  297. DEFAULT_BACKEND = name
  298. def effective_n_jobs(n_jobs=-1):
  299. """Determine the number of jobs that can actually run in parallel
  300. n_jobs is the number of workers requested by the callers. Passing n_jobs=-1
  301. means requesting all available workers for instance matching the number of
  302. CPU cores on the worker host(s).
  303. This method should return a guesstimate of the number of workers that can
  304. actually perform work concurrently with the currently enabled default
  305. backend. The primary use case is to make it possible for the caller to know
  306. in how many chunks to slice the work.
  307. In general working on larger data chunks is more efficient (less scheduling
  308. overhead and better use of CPU cache prefetching heuristics) as long as all
  309. the workers have enough work to do.
  310. Warning: this function is experimental and subject to change in a future
  311. version of joblib.
  312. .. versionadded:: 0.10
  313. """
  314. backend, backend_n_jobs = get_active_backend()
  315. if n_jobs is None:
  316. n_jobs = backend_n_jobs
  317. return backend.effective_n_jobs(n_jobs=n_jobs)
  318. ###############################################################################
  319. class Parallel(Logger):
  320. ''' Helper class for readable parallel mapping.
  321. Read more in the :ref:`User Guide <parallel>`.
  322. Parameters
  323. -----------
  324. n_jobs: int, default: None
  325. The maximum number of concurrently running jobs, such as the number
  326. of Python worker processes when backend="multiprocessing"
  327. or the size of the thread-pool when backend="threading".
  328. If -1 all CPUs are used. If 1 is given, no parallel computing code
  329. is used at all, which is useful for debugging. For n_jobs below -1,
  330. (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
  331. CPUs but one are used.
  332. None is a marker for 'unset' that will be interpreted as n_jobs=1
  333. (sequential execution) unless the call is performed under a
  334. parallel_backend context manager that sets another value for
  335. n_jobs.
  336. backend: str, ParallelBackendBase instance or None, default: 'loky'
  337. Specify the parallelization backend implementation.
  338. Supported backends are:
  339. - "loky" used by default, can induce some
  340. communication and memory overhead when exchanging input and
  341. output data with the worker Python processes.
  342. - "multiprocessing" previous process-based backend based on
  343. `multiprocessing.Pool`. Less robust than `loky`.
  344. - "threading" is a very low-overhead backend but it suffers
  345. from the Python Global Interpreter Lock if the called function
  346. relies a lot on Python objects. "threading" is mostly useful
  347. when the execution bottleneck is a compiled extension that
  348. explicitly releases the GIL (for instance a Cython loop wrapped
  349. in a "with nogil" block or an expensive call to a library such
  350. as NumPy).
  351. - finally, you can register backends by calling
  352. register_parallel_backend. This will allow you to implement
  353. a backend of your liking.
  354. It is not recommended to hard-code the backend name in a call to
  355. Parallel in a library. Instead it is recommended to set soft hints
  356. (prefer) or hard constraints (require) so as to make it possible
  357. for library users to change the backend from the outside using the
  358. parallel_backend context manager.
  359. prefer: str in {'processes', 'threads'} or None, default: None
  360. Soft hint to choose the default backend if no specific backend
  361. was selected with the parallel_backend context manager. The
  362. default process-based backend is 'loky' and the default
  363. thread-based backend is 'threading'. Ignored if the ``backend``
  364. parameter is specified.
  365. require: 'sharedmem' or None, default None
  366. Hard constraint to select the backend. If set to 'sharedmem',
  367. the selected backend will be single-host and thread-based even
  368. if the user asked for a non-thread based backend with
  369. parallel_backend.
  370. verbose: int, optional
  371. The verbosity level: if non zero, progress messages are
  372. printed. Above 50, the output is sent to stdout.
  373. The frequency of the messages increases with the verbosity level.
  374. If it more than 10, all iterations are reported.
  375. timeout: float, optional
  376. Timeout limit for each task to complete. If any task takes longer
  377. a TimeOutError will be raised. Only applied when n_jobs != 1
  378. pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
  379. The number of batches (of tasks) to be pre-dispatched.
  380. Default is '2*n_jobs'. When batch_size="auto" this is reasonable
  381. default and the workers should never starve.
  382. batch_size: int or 'auto', default: 'auto'
  383. The number of atomic tasks to dispatch at once to each
  384. worker. When individual evaluations are very fast, dispatching
  385. calls to workers can be slower than sequential computation because
  386. of the overhead. Batching fast computations together can mitigate
  387. this.
  388. The ``'auto'`` strategy keeps track of the time it takes for a batch
  389. to complete, and dynamically adjusts the batch size to keep the time
  390. on the order of half a second, using a heuristic. The initial batch
  391. size is 1.
  392. ``batch_size="auto"`` with ``backend="threading"`` will dispatch
  393. batches of a single task at a time as the threading backend has
  394. very little overhead and using larger batch size has not proved to
  395. bring any gain in that case.
  396. temp_folder: str, optional
  397. Folder to be used by the pool for memmapping large arrays
  398. for sharing memory with worker processes. If None, this will try in
  399. order:
  400. - a folder pointed by the JOBLIB_TEMP_FOLDER environment
  401. variable,
  402. - /dev/shm if the folder exists and is writable: this is a
  403. RAM disk filesystem available by default on modern Linux
  404. distributions,
  405. - the default system temporary folder that can be
  406. overridden with TMP, TMPDIR or TEMP environment
  407. variables, typically /tmp under Unix operating systems.
  408. Only active when backend="loky" or "multiprocessing".
  409. max_nbytes int, str, or None, optional, 1M by default
  410. Threshold on the size of arrays passed to the workers that
  411. triggers automated memory mapping in temp_folder. Can be an int
  412. in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
  413. Use None to disable memmapping of large arrays.
  414. Only active when backend="loky" or "multiprocessing".
  415. mmap_mode: {None, 'r+', 'r', 'w+', 'c'}
  416. Memmapping mode for numpy arrays passed to workers.
  417. See 'max_nbytes' parameter documentation for more details.
  418. Notes
  419. -----
  420. This object uses workers to compute in parallel the application of a
  421. function to many different arguments. The main functionality it brings
  422. in addition to using the raw multiprocessing or concurrent.futures API
  423. are (see examples for details):
  424. * More readable code, in particular since it avoids
  425. constructing list of arguments.
  426. * Easier debugging:
  427. - informative tracebacks even when the error happens on
  428. the client side
  429. - using 'n_jobs=1' enables to turn off parallel computing
  430. for debugging without changing the codepath
  431. - early capture of pickling errors
  432. * An optional progress meter.
  433. * Interruption of multiprocesses jobs with 'Ctrl-C'
  434. * Flexible pickling control for the communication to and from
  435. the worker processes.
  436. * Ability to use shared memory efficiently with worker
  437. processes for large numpy-based datastructures.
  438. Examples
  439. --------
  440. A simple example:
  441. >>> from math import sqrt
  442. >>> from joblib import Parallel, delayed
  443. >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
  444. [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
  445. Reshaping the output when the function has several return
  446. values:
  447. >>> from math import modf
  448. >>> from joblib import Parallel, delayed
  449. >>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
  450. >>> res, i = zip(*r)
  451. >>> res
  452. (0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
  453. >>> i
  454. (0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
  455. The progress meter: the higher the value of `verbose`, the more
  456. messages:
  457. >>> from time import sleep
  458. >>> from joblib import Parallel, delayed
  459. >>> r = Parallel(n_jobs=2, verbose=10)(delayed(sleep)(.2) for _ in range(10)) #doctest: +SKIP
  460. [Parallel(n_jobs=2)]: Done 1 tasks | elapsed: 0.6s
  461. [Parallel(n_jobs=2)]: Done 4 tasks | elapsed: 0.8s
  462. [Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 1.4s finished
  463. Traceback example, note how the line of the error is indicated
  464. as well as the values of the parameter passed to the function that
  465. triggered the exception, even though the traceback happens in the
  466. child process:
  467. >>> from heapq import nlargest
  468. >>> from joblib import Parallel, delayed
  469. >>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
  470. #...
  471. ---------------------------------------------------------------------------
  472. Sub-process traceback:
  473. ---------------------------------------------------------------------------
  474. TypeError Mon Nov 12 11:37:46 2012
  475. PID: 12934 Python 2.7.3: /usr/bin/python
  476. ...........................................................................
  477. /usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
  478. 419 if n >= size:
  479. 420 return sorted(iterable, key=key, reverse=True)[:n]
  480. 421
  481. 422 # When key is none, use simpler decoration
  482. 423 if key is None:
  483. --> 424 it = izip(iterable, count(0,-1)) # decorate
  484. 425 result = _nlargest(n, it)
  485. 426 return map(itemgetter(0), result) # undecorate
  486. 427
  487. 428 # General case, slowest method
  488. TypeError: izip argument #1 must support iteration
  489. ___________________________________________________________________________
  490. Using pre_dispatch in a producer/consumer situation, where the
  491. data is generated on the fly. Note how the producer is first
  492. called 3 times before the parallel loop is initiated, and then
  493. called to generate new data on the fly:
  494. >>> from math import sqrt
  495. >>> from joblib import Parallel, delayed
  496. >>> def producer():
  497. ... for i in range(6):
  498. ... print('Produced %s' % i)
  499. ... yield i
  500. >>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
  501. ... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
  502. Produced 0
  503. Produced 1
  504. Produced 2
  505. [Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
  506. Produced 3
  507. [Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
  508. Produced 4
  509. [Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
  510. Produced 5
  511. [Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
  512. [Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s remaining: 0.0s
  513. [Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
  514. '''
  515. def __init__(self, n_jobs=None, backend=None, verbose=0, timeout=None,
  516. pre_dispatch='2 * n_jobs', batch_size='auto',
  517. temp_folder=None, max_nbytes='1M', mmap_mode='r',
  518. prefer=None, require=None):
  519. active_backend, context_n_jobs = get_active_backend(
  520. prefer=prefer, require=require, verbose=verbose)
  521. nesting_level = active_backend.nesting_level
  522. if backend is None and n_jobs is None:
  523. # If we are under a parallel_backend context manager, look up
  524. # the default number of jobs and use that instead:
  525. n_jobs = context_n_jobs
  526. if n_jobs is None:
  527. # No specific context override and no specific value request:
  528. # default to 1.
  529. n_jobs = 1
  530. self.n_jobs = n_jobs
  531. self.verbose = verbose
  532. self.timeout = timeout
  533. self.pre_dispatch = pre_dispatch
  534. self._ready_batches = queue.Queue()
  535. self._id = uuid4().hex
  536. self._reducer_callback = None
  537. if isinstance(max_nbytes, str):
  538. max_nbytes = memstr_to_bytes(max_nbytes)
  539. self._backend_args = dict(
  540. max_nbytes=max_nbytes,
  541. mmap_mode=mmap_mode,
  542. temp_folder=temp_folder,
  543. prefer=prefer,
  544. require=require,
  545. verbose=max(0, self.verbose - 50),
  546. )
  547. if DEFAULT_MP_CONTEXT is not None:
  548. self._backend_args['context'] = DEFAULT_MP_CONTEXT
  549. elif hasattr(mp, "get_context"):
  550. self._backend_args['context'] = mp.get_context()
  551. if backend is None:
  552. backend = active_backend
  553. elif isinstance(backend, ParallelBackendBase):
  554. # Use provided backend as is, with the current nesting_level if it
  555. # is not set yet.
  556. if backend.nesting_level is None:
  557. backend.nesting_level = nesting_level
  558. elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
  559. # Make it possible to pass a custom multiprocessing context as
  560. # backend to change the start method to forkserver or spawn or
  561. # preload modules on the forkserver helper process.
  562. self._backend_args['context'] = backend
  563. backend = MultiprocessingBackend(nesting_level=nesting_level)
  564. else:
  565. try:
  566. backend_factory = BACKENDS[backend]
  567. except KeyError as e:
  568. raise ValueError("Invalid backend: %s, expected one of %r"
  569. % (backend, sorted(BACKENDS.keys()))) from e
  570. backend = backend_factory(nesting_level=nesting_level)
  571. if (require == 'sharedmem' and
  572. not getattr(backend, 'supports_sharedmem', False)):
  573. raise ValueError("Backend %s does not support shared memory"
  574. % backend)
  575. if (batch_size == 'auto' or isinstance(batch_size, Integral) and
  576. batch_size > 0):
  577. self.batch_size = batch_size
  578. else:
  579. raise ValueError(
  580. "batch_size must be 'auto' or a positive integer, got: %r"
  581. % batch_size)
  582. self._backend = backend
  583. self._output = None
  584. self._jobs = list()
  585. self._managed_backend = False
  586. # This lock is used coordinate the main thread of this process with
  587. # the async callback thread of our the pool.
  588. self._lock = threading.RLock()
  589. def __enter__(self):
  590. self._managed_backend = True
  591. self._initialize_backend()
  592. return self
  593. def __exit__(self, exc_type, exc_value, traceback):
  594. self._terminate_backend()
  595. self._managed_backend = False
  596. def _initialize_backend(self):
  597. """Build a process or thread pool and return the number of workers"""
  598. try:
  599. n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self,
  600. **self._backend_args)
  601. if self.timeout is not None and not self._backend.supports_timeout:
  602. warnings.warn(
  603. 'The backend class {!r} does not support timeout. '
  604. "You have set 'timeout={}' in Parallel but "
  605. "the 'timeout' parameter will not be used.".format(
  606. self._backend.__class__.__name__,
  607. self.timeout))
  608. except FallbackToBackend as e:
  609. # Recursively initialize the backend in case of requested fallback.
  610. self._backend = e.backend
  611. n_jobs = self._initialize_backend()
  612. return n_jobs
  613. def _effective_n_jobs(self):
  614. if self._backend:
  615. return self._backend.effective_n_jobs(self.n_jobs)
  616. return 1
  617. def _terminate_backend(self):
  618. if self._backend is not None:
  619. self._backend.terminate()
  620. def _dispatch(self, batch):
  621. """Queue the batch for computing, with or without multiprocessing
  622. WARNING: this method is not thread-safe: it should be only called
  623. indirectly via dispatch_one_batch.
  624. """
  625. # If job.get() catches an exception, it closes the queue:
  626. if self._aborting:
  627. return
  628. self.n_dispatched_tasks += len(batch)
  629. self.n_dispatched_batches += 1
  630. dispatch_timestamp = time.time()
  631. cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
  632. with self._lock:
  633. job_idx = len(self._jobs)
  634. job = self._backend.apply_async(batch, callback=cb)
  635. # A job can complete so quickly than its callback is
  636. # called before we get here, causing self._jobs to
  637. # grow. To ensure correct results ordering, .insert is
  638. # used (rather than .append) in the following line
  639. self._jobs.insert(job_idx, job)
  640. def dispatch_next(self):
  641. """Dispatch more data for parallel processing
  642. This method is meant to be called concurrently by the multiprocessing
  643. callback. We rely on the thread-safety of dispatch_one_batch to protect
  644. against concurrent consumption of the unprotected iterator.
  645. """
  646. if not self.dispatch_one_batch(self._original_iterator):
  647. self._iterating = False
  648. self._original_iterator = None
  649. def dispatch_one_batch(self, iterator):
  650. """Prefetch the tasks for the next batch and dispatch them.
  651. The effective size of the batch is computed here.
  652. If there are no more jobs to dispatch, return False, else return True.
  653. The iterator consumption and dispatching is protected by the same
  654. lock so calling this function should be thread safe.
  655. """
  656. if self.batch_size == 'auto':
  657. batch_size = self._backend.compute_batch_size()
  658. else:
  659. # Fixed batch size strategy
  660. batch_size = self.batch_size
  661. with self._lock:
  662. # to ensure an even distribution of the workolad between workers,
  663. # we look ahead in the original iterators more than batch_size
  664. # tasks - However, we keep consuming only one batch at each
  665. # dispatch_one_batch call. The extra tasks are stored in a local
  666. # queue, _ready_batches, that is looked-up prior to re-consuming
  667. # tasks from the origal iterator.
  668. try:
  669. tasks = self._ready_batches.get(block=False)
  670. except queue.Empty:
  671. # slice the iterator n_jobs * batchsize items at a time. If the
  672. # slice returns less than that, then the current batchsize puts
  673. # too much weight on a subset of workers, while other may end
  674. # up starving. So in this case, re-scale the batch size
  675. # accordingly to distribute evenly the last items between all
  676. # workers.
  677. n_jobs = self._cached_effective_n_jobs
  678. big_batch_size = batch_size * n_jobs
  679. islice = list(itertools.islice(iterator, big_batch_size))
  680. if len(islice) == 0:
  681. return False
  682. elif (iterator is self._original_iterator
  683. and len(islice) < big_batch_size):
  684. # We reached the end of the original iterator (unless
  685. # iterator is the ``pre_dispatch``-long initial slice of
  686. # the original iterator) -- decrease the batch size to
  687. # account for potential variance in the batches running
  688. # time.
  689. final_batch_size = max(1, len(islice) // (10 * n_jobs))
  690. else:
  691. final_batch_size = max(1, len(islice) // n_jobs)
  692. # enqueue n_jobs batches in a local queue
  693. for i in range(0, len(islice), final_batch_size):
  694. tasks = BatchedCalls(islice[i:i + final_batch_size],
  695. self._backend.get_nested_backend(),
  696. self._reducer_callback,
  697. self._pickle_cache)
  698. self._ready_batches.put(tasks)
  699. # finally, get one task.
  700. tasks = self._ready_batches.get(block=False)
  701. if len(tasks) == 0:
  702. # No more tasks available in the iterator: tell caller to stop.
  703. return False
  704. else:
  705. self._dispatch(tasks)
  706. return True
  707. def _print(self, msg, msg_args):
  708. """Display the message on stout or stderr depending on verbosity"""
  709. # XXX: Not using the logger framework: need to
  710. # learn to use logger better.
  711. if not self.verbose:
  712. return
  713. if self.verbose < 50:
  714. writer = sys.stderr.write
  715. else:
  716. writer = sys.stdout.write
  717. msg = msg % msg_args
  718. writer('[%s]: %s\n' % (self, msg))
  719. def print_progress(self):
  720. """Display the process of the parallel execution only a fraction
  721. of time, controlled by self.verbose.
  722. """
  723. if not self.verbose:
  724. return
  725. elapsed_time = time.time() - self._start_time
  726. # Original job iterator becomes None once it has been fully
  727. # consumed : at this point we know the total number of jobs and we are
  728. # able to display an estimation of the remaining time based on already
  729. # completed jobs. Otherwise, we simply display the number of completed
  730. # tasks.
  731. if self._original_iterator is not None:
  732. if _verbosity_filter(self.n_dispatched_batches, self.verbose):
  733. return
  734. self._print('Done %3i tasks | elapsed: %s',
  735. (self.n_completed_tasks,
  736. short_format_time(elapsed_time), ))
  737. else:
  738. index = self.n_completed_tasks
  739. # We are finished dispatching
  740. total_tasks = self.n_dispatched_tasks
  741. # We always display the first loop
  742. if not index == 0:
  743. # Display depending on the number of remaining items
  744. # A message as soon as we finish dispatching, cursor is 0
  745. cursor = (total_tasks - index + 1 -
  746. self._pre_dispatch_amount)
  747. frequency = (total_tasks // self.verbose) + 1
  748. is_last_item = (index + 1 == total_tasks)
  749. if (is_last_item or cursor % frequency):
  750. return
  751. remaining_time = (elapsed_time / index) * \
  752. (self.n_dispatched_tasks - index * 1.0)
  753. # only display status if remaining time is greater or equal to 0
  754. self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
  755. (index,
  756. total_tasks,
  757. short_format_time(elapsed_time),
  758. short_format_time(remaining_time),
  759. ))
  760. def retrieve(self):
  761. self._output = list()
  762. while self._iterating or len(self._jobs) > 0:
  763. if len(self._jobs) == 0:
  764. # Wait for an async callback to dispatch new jobs
  765. time.sleep(0.01)
  766. continue
  767. # We need to be careful: the job list can be filling up as
  768. # we empty it and Python list are not thread-safe by default hence
  769. # the use of the lock
  770. with self._lock:
  771. job = self._jobs.pop(0)
  772. try:
  773. if getattr(self._backend, 'supports_timeout', False):
  774. self._output.extend(job.get(timeout=self.timeout))
  775. else:
  776. self._output.extend(job.get())
  777. except BaseException as exception:
  778. # Note: we catch any BaseException instead of just Exception
  779. # instances to also include KeyboardInterrupt.
  780. # Stop dispatching any new job in the async callback thread
  781. self._aborting = True
  782. # If the backend allows it, cancel or kill remaining running
  783. # tasks without waiting for the results as we will raise
  784. # the exception we got back to the caller instead of returning
  785. # any result.
  786. backend = self._backend
  787. if (backend is not None and
  788. hasattr(backend, 'abort_everything')):
  789. # If the backend is managed externally we need to make sure
  790. # to leave it in a working state to allow for future jobs
  791. # scheduling.
  792. ensure_ready = self._managed_backend
  793. backend.abort_everything(ensure_ready=ensure_ready)
  794. raise
  795. def __call__(self, iterable):
  796. if self._jobs:
  797. raise ValueError('This Parallel instance is already running')
  798. # A flag used to abort the dispatching of jobs in case an
  799. # exception is found
  800. self._aborting = False
  801. if not self._managed_backend:
  802. n_jobs = self._initialize_backend()
  803. else:
  804. n_jobs = self._effective_n_jobs()
  805. if isinstance(self._backend, LokyBackend):
  806. # For the loky backend, we add a callback executed when reducing
  807. # BatchCalls, that makes the loky executor use a temporary folder
  808. # specific to this Parallel object when pickling temporary memmaps.
  809. # This callback is necessary to ensure that several Parallel
  810. # objects using the same resuable executor don't use the same
  811. # temporary resources.
  812. def _batched_calls_reducer_callback():
  813. # Relevant implementation detail: the following lines, called
  814. # when reducing BatchedCalls, are called in a thread-safe
  815. # situation, meaning that the context of the temporary folder
  816. # manager will not be changed in between the callback execution
  817. # and the end of the BatchedCalls pickling. The reason is that
  818. # pickling (the only place where set_current_context is used)
  819. # is done from a single thread (the queue_feeder_thread).
  820. self._backend._workers._temp_folder_manager.set_current_context( # noqa
  821. self._id
  822. )
  823. self._reducer_callback = _batched_calls_reducer_callback
  824. # self._effective_n_jobs should be called in the Parallel.__call__
  825. # thread only -- store its value in an attribute for further queries.
  826. self._cached_effective_n_jobs = n_jobs
  827. backend_name = self._backend.__class__.__name__
  828. if n_jobs == 0:
  829. raise RuntimeError("%s has no active worker." % backend_name)
  830. self._print("Using backend %s with %d concurrent workers.",
  831. (backend_name, n_jobs))
  832. if hasattr(self._backend, 'start_call'):
  833. self._backend.start_call()
  834. iterator = iter(iterable)
  835. pre_dispatch = self.pre_dispatch
  836. if pre_dispatch == 'all' or n_jobs == 1:
  837. # prevent further dispatch via multiprocessing callback thread
  838. self._original_iterator = None
  839. self._pre_dispatch_amount = 0
  840. else:
  841. self._original_iterator = iterator
  842. if hasattr(pre_dispatch, 'endswith'):
  843. pre_dispatch = eval(pre_dispatch)
  844. self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
  845. # The main thread will consume the first pre_dispatch items and
  846. # the remaining items will later be lazily dispatched by async
  847. # callbacks upon task completions.
  848. # TODO: this iterator should be batch_size * n_jobs
  849. iterator = itertools.islice(iterator, self._pre_dispatch_amount)
  850. self._start_time = time.time()
  851. self.n_dispatched_batches = 0
  852. self.n_dispatched_tasks = 0
  853. self.n_completed_tasks = 0
  854. # Use a caching dict for callables that are pickled with cloudpickle to
  855. # improve performances. This cache is used only in the case of
  856. # functions that are defined in the __main__ module, functions that are
  857. # defined locally (inside another function) and lambda expressions.
  858. self._pickle_cache = dict()
  859. try:
  860. # Only set self._iterating to True if at least a batch
  861. # was dispatched. In particular this covers the edge
  862. # case of Parallel used with an exhausted iterator. If
  863. # self._original_iterator is None, then this means either
  864. # that pre_dispatch == "all", n_jobs == 1 or that the first batch
  865. # was very quick and its callback already dispatched all the
  866. # remaining jobs.
  867. self._iterating = False
  868. if self.dispatch_one_batch(iterator):
  869. self._iterating = self._original_iterator is not None
  870. while self.dispatch_one_batch(iterator):
  871. pass
  872. if pre_dispatch == "all" or n_jobs == 1:
  873. # The iterable was consumed all at once by the above for loop.
  874. # No need to wait for async callbacks to trigger to
  875. # consumption.
  876. self._iterating = False
  877. with self._backend.retrieval_context():
  878. self.retrieve()
  879. # Make sure that we get a last message telling us we are done
  880. elapsed_time = time.time() - self._start_time
  881. self._print('Done %3i out of %3i | elapsed: %s finished',
  882. (len(self._output), len(self._output),
  883. short_format_time(elapsed_time)))
  884. finally:
  885. if hasattr(self._backend, 'stop_call'):
  886. self._backend.stop_call()
  887. if not self._managed_backend:
  888. self._terminate_backend()
  889. self._jobs = list()
  890. self._pickle_cache = None
  891. output = self._output
  892. self._output = None
  893. return output
  894. def __repr__(self):
  895. return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)