numpy_pickle_compat.py 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. """Numpy pickle compatibility functions."""
  2. import pickle
  3. import os
  4. import zlib
  5. import inspect
  6. from io import BytesIO
  7. from .numpy_pickle_utils import _ZFILE_PREFIX
  8. from .numpy_pickle_utils import Unpickler
  9. def hex_str(an_int):
  10. """Convert an int to an hexadecimal string."""
  11. return '{:#x}'.format(an_int)
  12. def asbytes(s):
  13. if isinstance(s, bytes):
  14. return s
  15. return s.encode('latin1')
  16. _MAX_LEN = len(hex_str(2 ** 64))
  17. _CHUNK_SIZE = 64 * 1024
  18. def read_zfile(file_handle):
  19. """Read the z-file and return the content as a string.
  20. Z-files are raw data compressed with zlib used internally by joblib
  21. for persistence. Backward compatibility is not guaranteed. Do not
  22. use for external purposes.
  23. """
  24. file_handle.seek(0)
  25. header_length = len(_ZFILE_PREFIX) + _MAX_LEN
  26. length = file_handle.read(header_length)
  27. length = length[len(_ZFILE_PREFIX):]
  28. length = int(length, 16)
  29. # With python2 and joblib version <= 0.8.4 compressed pickle header is one
  30. # character wider so we need to ignore an additional space if present.
  31. # Note: the first byte of the zlib data is guaranteed not to be a
  32. # space according to
  33. # https://tools.ietf.org/html/rfc6713#section-2.1
  34. next_byte = file_handle.read(1)
  35. if next_byte != b' ':
  36. # The zlib compressed data has started and we need to go back
  37. # one byte
  38. file_handle.seek(header_length)
  39. # We use the known length of the data to tell Zlib the size of the
  40. # buffer to allocate.
  41. data = zlib.decompress(file_handle.read(), 15, length)
  42. assert len(data) == length, (
  43. "Incorrect data length while decompressing %s."
  44. "The file could be corrupted." % file_handle)
  45. return data
  46. def write_zfile(file_handle, data, compress=1):
  47. """Write the data in the given file as a Z-file.
  48. Z-files are raw data compressed with zlib used internally by joblib
  49. for persistence. Backward compatibility is not guarantied. Do not
  50. use for external purposes.
  51. """
  52. file_handle.write(_ZFILE_PREFIX)
  53. length = hex_str(len(data))
  54. # Store the length of the data
  55. file_handle.write(asbytes(length.ljust(_MAX_LEN)))
  56. file_handle.write(zlib.compress(asbytes(data), compress))
  57. ###############################################################################
  58. # Utility objects for persistence.
  59. class NDArrayWrapper(object):
  60. """An object to be persisted instead of numpy arrays.
  61. The only thing this object does, is to carry the filename in which
  62. the array has been persisted, and the array subclass.
  63. """
  64. def __init__(self, filename, subclass, allow_mmap=True):
  65. """Constructor. Store the useful information for later."""
  66. self.filename = filename
  67. self.subclass = subclass
  68. self.allow_mmap = allow_mmap
  69. def read(self, unpickler):
  70. """Reconstruct the array."""
  71. filename = os.path.join(unpickler._dirname, self.filename)
  72. # Load the array from the disk
  73. # use getattr instead of self.allow_mmap to ensure backward compat
  74. # with NDArrayWrapper instances pickled with joblib < 0.9.0
  75. allow_mmap = getattr(self, 'allow_mmap', True)
  76. kwargs = {}
  77. if allow_mmap:
  78. kwargs['mmap_mode'] = unpickler.mmap_mode
  79. if "allow_pickle" in inspect.signature(unpickler.np.load).parameters:
  80. # Required in numpy 1.16.3 and later to aknowledge the security
  81. # risk.
  82. kwargs["allow_pickle"] = True
  83. array = unpickler.np.load(filename, **kwargs)
  84. # Reconstruct subclasses. This does not work with old
  85. # versions of numpy
  86. if (hasattr(array, '__array_prepare__') and
  87. self.subclass not in (unpickler.np.ndarray,
  88. unpickler.np.memmap)):
  89. # We need to reconstruct another subclass
  90. new_array = unpickler.np.core.multiarray._reconstruct(
  91. self.subclass, (0,), 'b')
  92. return new_array.__array_prepare__(array)
  93. else:
  94. return array
  95. class ZNDArrayWrapper(NDArrayWrapper):
  96. """An object to be persisted instead of numpy arrays.
  97. This object store the Zfile filename in which
  98. the data array has been persisted, and the meta information to
  99. retrieve it.
  100. The reason that we store the raw buffer data of the array and
  101. the meta information, rather than array representation routine
  102. (tobytes) is that it enables us to use completely the strided
  103. model to avoid memory copies (a and a.T store as fast). In
  104. addition saving the heavy information separately can avoid
  105. creating large temporary buffers when unpickling data with
  106. large arrays.
  107. """
  108. def __init__(self, filename, init_args, state):
  109. """Constructor. Store the useful information for later."""
  110. self.filename = filename
  111. self.state = state
  112. self.init_args = init_args
  113. def read(self, unpickler):
  114. """Reconstruct the array from the meta-information and the z-file."""
  115. # Here we a simply reproducing the unpickling mechanism for numpy
  116. # arrays
  117. filename = os.path.join(unpickler._dirname, self.filename)
  118. array = unpickler.np.core.multiarray._reconstruct(*self.init_args)
  119. with open(filename, 'rb') as f:
  120. data = read_zfile(f)
  121. state = self.state + (data,)
  122. array.__setstate__(state)
  123. return array
  124. class ZipNumpyUnpickler(Unpickler):
  125. """A subclass of the Unpickler to unpickle our numpy pickles."""
  126. dispatch = Unpickler.dispatch.copy()
  127. def __init__(self, filename, file_handle, mmap_mode=None):
  128. """Constructor."""
  129. self._filename = os.path.basename(filename)
  130. self._dirname = os.path.dirname(filename)
  131. self.mmap_mode = mmap_mode
  132. self.file_handle = self._open_pickle(file_handle)
  133. Unpickler.__init__(self, self.file_handle)
  134. try:
  135. import numpy as np
  136. except ImportError:
  137. np = None
  138. self.np = np
  139. def _open_pickle(self, file_handle):
  140. return BytesIO(read_zfile(file_handle))
  141. def load_build(self):
  142. """Set the state of a newly created object.
  143. We capture it to replace our place-holder objects,
  144. NDArrayWrapper, by the array we are interested in. We
  145. replace them directly in the stack of pickler.
  146. """
  147. Unpickler.load_build(self)
  148. if isinstance(self.stack[-1], NDArrayWrapper):
  149. if self.np is None:
  150. raise ImportError("Trying to unpickle an ndarray, "
  151. "but numpy didn't import correctly")
  152. nd_array_wrapper = self.stack.pop()
  153. array = nd_array_wrapper.read(self)
  154. self.stack.append(array)
  155. dispatch[pickle.BUILD[0]] = load_build
  156. def load_compatibility(filename):
  157. """Reconstruct a Python object from a file persisted with joblib.dump.
  158. This function ensures the compatibility with joblib old persistence format
  159. (<= 0.9.3).
  160. Parameters
  161. -----------
  162. filename: string
  163. The name of the file from which to load the object
  164. Returns
  165. -------
  166. result: any Python object
  167. The object stored in the file.
  168. See Also
  169. --------
  170. joblib.dump : function to save an object
  171. Notes
  172. -----
  173. This function can load numpy array files saved separately during the
  174. dump.
  175. """
  176. with open(filename, 'rb') as file_handle:
  177. # We are careful to open the file handle early and keep it open to
  178. # avoid race-conditions on renames. That said, if data is stored in
  179. # companion files, moving the directory will create a race when
  180. # joblib tries to access the companion files.
  181. unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle)
  182. try:
  183. obj = unpickler.load()
  184. except UnicodeDecodeError as exc:
  185. # More user-friendly error message
  186. new_exc = ValueError(
  187. 'You may be trying to read with '
  188. 'python 3 a joblib pickle generated with python 2. '
  189. 'This feature is not supported by joblib.')
  190. new_exc.__cause__ = exc
  191. raise new_exc
  192. finally:
  193. if hasattr(unpickler, 'file_handle'):
  194. unpickler.file_handle.close()
  195. return obj