util.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. # Natural Language Toolkit: Corpus Reader Utilities
  2. #
  3. # Copyright (C) 2001-2020 NLTK Project
  4. # Author: Steven Bird <stevenbird1@gmail.com>
  5. # Edward Loper <edloper@gmail.com>
  6. # URL: <http://nltk.org/>
  7. # For license information, see LICENSE.TXT
  8. import os
  9. import bisect
  10. import re
  11. import tempfile
  12. import pickle
  13. from functools import reduce
  14. from xml.etree import ElementTree
  15. from nltk.tokenize import wordpunct_tokenize
  16. from nltk.internals import slice_bounds
  17. from nltk.data import PathPointer, FileSystemPathPointer, ZipFilePathPointer
  18. from nltk.data import SeekableUnicodeStreamReader
  19. from nltk.util import AbstractLazySequence, LazySubsequence, LazyConcatenation, py25
  20. ######################################################################
  21. # { Corpus View
  22. ######################################################################
  23. class StreamBackedCorpusView(AbstractLazySequence):
  24. """
  25. A 'view' of a corpus file, which acts like a sequence of tokens:
  26. it can be accessed by index, iterated over, etc. However, the
  27. tokens are only constructed as-needed -- the entire corpus is
  28. never stored in memory at once.
  29. The constructor to ``StreamBackedCorpusView`` takes two arguments:
  30. a corpus fileid (specified as a string or as a ``PathPointer``);
  31. and a block reader. A "block reader" is a function that reads
  32. zero or more tokens from a stream, and returns them as a list. A
  33. very simple example of a block reader is:
  34. >>> def simple_block_reader(stream):
  35. ... return stream.readline().split()
  36. This simple block reader reads a single line at a time, and
  37. returns a single token (consisting of a string) for each
  38. whitespace-separated substring on the line.
  39. When deciding how to define the block reader for a given
  40. corpus, careful consideration should be given to the size of
  41. blocks handled by the block reader. Smaller block sizes will
  42. increase the memory requirements of the corpus view's internal
  43. data structures (by 2 integers per block). On the other hand,
  44. larger block sizes may decrease performance for random access to
  45. the corpus. (But note that larger block sizes will *not*
  46. decrease performance for iteration.)
  47. Internally, ``CorpusView`` maintains a partial mapping from token
  48. index to file position, with one entry per block. When a token
  49. with a given index *i* is requested, the ``CorpusView`` constructs
  50. it as follows:
  51. 1. First, it searches the toknum/filepos mapping for the token
  52. index closest to (but less than or equal to) *i*.
  53. 2. Then, starting at the file position corresponding to that
  54. index, it reads one block at a time using the block reader
  55. until it reaches the requested token.
  56. The toknum/filepos mapping is created lazily: it is initially
  57. empty, but every time a new block is read, the block's
  58. initial token is added to the mapping. (Thus, the toknum/filepos
  59. map has one entry per block.)
  60. In order to increase efficiency for random access patterns that
  61. have high degrees of locality, the corpus view may cache one or
  62. more blocks.
  63. :note: Each ``CorpusView`` object internally maintains an open file
  64. object for its underlying corpus file. This file should be
  65. automatically closed when the ``CorpusView`` is garbage collected,
  66. but if you wish to close it manually, use the ``close()``
  67. method. If you access a ``CorpusView``'s items after it has been
  68. closed, the file object will be automatically re-opened.
  69. :warning: If the contents of the file are modified during the
  70. lifetime of the ``CorpusView``, then the ``CorpusView``'s behavior
  71. is undefined.
  72. :warning: If a unicode encoding is specified when constructing a
  73. ``CorpusView``, then the block reader may only call
  74. ``stream.seek()`` with offsets that have been returned by
  75. ``stream.tell()``; in particular, calling ``stream.seek()`` with
  76. relative offsets, or with offsets based on string lengths, may
  77. lead to incorrect behavior.
  78. :ivar _block_reader: The function used to read
  79. a single block from the underlying file stream.
  80. :ivar _toknum: A list containing the token index of each block
  81. that has been processed. In particular, ``_toknum[i]`` is the
  82. token index of the first token in block ``i``. Together
  83. with ``_filepos``, this forms a partial mapping between token
  84. indices and file positions.
  85. :ivar _filepos: A list containing the file position of each block
  86. that has been processed. In particular, ``_toknum[i]`` is the
  87. file position of the first character in block ``i``. Together
  88. with ``_toknum``, this forms a partial mapping between token
  89. indices and file positions.
  90. :ivar _stream: The stream used to access the underlying corpus file.
  91. :ivar _len: The total number of tokens in the corpus, if known;
  92. or None, if the number of tokens is not yet known.
  93. :ivar _eofpos: The character position of the last character in the
  94. file. This is calculated when the corpus view is initialized,
  95. and is used to decide when the end of file has been reached.
  96. :ivar _cache: A cache of the most recently read block. It
  97. is encoded as a tuple (start_toknum, end_toknum, tokens), where
  98. start_toknum is the token index of the first token in the block;
  99. end_toknum is the token index of the first token not in the
  100. block; and tokens is a list of the tokens in the block.
  101. """
  102. def __init__(self, fileid, block_reader=None, startpos=0, encoding="utf8"):
  103. """
  104. Create a new corpus view, based on the file ``fileid``, and
  105. read with ``block_reader``. See the class documentation
  106. for more information.
  107. :param fileid: The path to the file that is read by this
  108. corpus view. ``fileid`` can either be a string or a
  109. ``PathPointer``.
  110. :param startpos: The file position at which the view will
  111. start reading. This can be used to skip over preface
  112. sections.
  113. :param encoding: The unicode encoding that should be used to
  114. read the file's contents. If no encoding is specified,
  115. then the file's contents will be read as a non-unicode
  116. string (i.e., a str).
  117. """
  118. if block_reader:
  119. self.read_block = block_reader
  120. # Initialize our toknum/filepos mapping.
  121. self._toknum = [0]
  122. self._filepos = [startpos]
  123. self._encoding = encoding
  124. # We don't know our length (number of tokens) yet.
  125. self._len = None
  126. self._fileid = fileid
  127. self._stream = None
  128. self._current_toknum = None
  129. """This variable is set to the index of the next token that
  130. will be read, immediately before ``self.read_block()`` is
  131. called. This is provided for the benefit of the block
  132. reader, which under rare circumstances may need to know
  133. the current token number."""
  134. self._current_blocknum = None
  135. """This variable is set to the index of the next block that
  136. will be read, immediately before ``self.read_block()`` is
  137. called. This is provided for the benefit of the block
  138. reader, which under rare circumstances may need to know
  139. the current block number."""
  140. # Find the length of the file.
  141. try:
  142. if isinstance(self._fileid, PathPointer):
  143. self._eofpos = self._fileid.file_size()
  144. else:
  145. self._eofpos = os.stat(self._fileid).st_size
  146. except Exception as exc:
  147. raise ValueError("Unable to open or access %r -- %s" % (fileid, exc))
  148. # Maintain a cache of the most recently read block, to
  149. # increase efficiency of random access.
  150. self._cache = (-1, -1, None)
  151. fileid = property(
  152. lambda self: self._fileid,
  153. doc="""
  154. The fileid of the file that is accessed by this view.
  155. :type: str or PathPointer""",
  156. )
  157. def read_block(self, stream):
  158. """
  159. Read a block from the input stream.
  160. :return: a block of tokens from the input stream
  161. :rtype: list(any)
  162. :param stream: an input stream
  163. :type stream: stream
  164. """
  165. raise NotImplementedError("Abstract Method")
  166. def _open(self):
  167. """
  168. Open the file stream associated with this corpus view. This
  169. will be called performed if any value is read from the view
  170. while its file stream is closed.
  171. """
  172. if isinstance(self._fileid, PathPointer):
  173. self._stream = self._fileid.open(self._encoding)
  174. elif self._encoding:
  175. self._stream = SeekableUnicodeStreamReader(
  176. open(self._fileid, "rb"), self._encoding
  177. )
  178. else:
  179. self._stream = open(self._fileid, "rb")
  180. def close(self):
  181. """
  182. Close the file stream associated with this corpus view. This
  183. can be useful if you are worried about running out of file
  184. handles (although the stream should automatically be closed
  185. upon garbage collection of the corpus view). If the corpus
  186. view is accessed after it is closed, it will be automatically
  187. re-opened.
  188. """
  189. if self._stream is not None:
  190. self._stream.close()
  191. self._stream = None
  192. def __len__(self):
  193. if self._len is None:
  194. # iterate_from() sets self._len when it reaches the end
  195. # of the file:
  196. for tok in self.iterate_from(self._toknum[-1]):
  197. pass
  198. return self._len
  199. def __getitem__(self, i):
  200. if isinstance(i, slice):
  201. start, stop = slice_bounds(self, i)
  202. # Check if it's in the cache.
  203. offset = self._cache[0]
  204. if offset <= start and stop <= self._cache[1]:
  205. return self._cache[2][start - offset : stop - offset]
  206. # Construct & return the result.
  207. return LazySubsequence(self, start, stop)
  208. else:
  209. # Handle negative indices
  210. if i < 0:
  211. i += len(self)
  212. if i < 0:
  213. raise IndexError("index out of range")
  214. # Check if it's in the cache.
  215. offset = self._cache[0]
  216. if offset <= i < self._cache[1]:
  217. return self._cache[2][i - offset]
  218. # Use iterate_from to extract it.
  219. try:
  220. return next(self.iterate_from(i))
  221. except StopIteration:
  222. raise IndexError("index out of range")
  223. # If we wanted to be thread-safe, then this method would need to
  224. # do some locking.
  225. def iterate_from(self, start_tok):
  226. # Start by feeding from the cache, if possible.
  227. if self._cache[0] <= start_tok < self._cache[1]:
  228. for tok in self._cache[2][start_tok - self._cache[0] :]:
  229. yield tok
  230. start_tok += 1
  231. # Decide where in the file we should start. If `start` is in
  232. # our mapping, then we can jump straight to the correct block;
  233. # otherwise, start at the last block we've processed.
  234. if start_tok < self._toknum[-1]:
  235. block_index = bisect.bisect_right(self._toknum, start_tok) - 1
  236. toknum = self._toknum[block_index]
  237. filepos = self._filepos[block_index]
  238. else:
  239. block_index = len(self._toknum) - 1
  240. toknum = self._toknum[-1]
  241. filepos = self._filepos[-1]
  242. # Open the stream, if it's not open already.
  243. if self._stream is None:
  244. self._open()
  245. # If the file is empty, the while loop will never run.
  246. # This *seems* to be all the state we need to set:
  247. if self._eofpos == 0:
  248. self._len = 0
  249. # Each iteration through this loop, we read a single block
  250. # from the stream.
  251. while filepos < self._eofpos:
  252. # Read the next block.
  253. self._stream.seek(filepos)
  254. self._current_toknum = toknum
  255. self._current_blocknum = block_index
  256. tokens = self.read_block(self._stream)
  257. assert isinstance(tokens, (tuple, list, AbstractLazySequence)), (
  258. "block reader %s() should return list or tuple."
  259. % self.read_block.__name__
  260. )
  261. num_toks = len(tokens)
  262. new_filepos = self._stream.tell()
  263. assert new_filepos > filepos, (
  264. "block reader %s() should consume at least 1 byte (filepos=%d)"
  265. % (self.read_block.__name__, filepos)
  266. )
  267. # Update our cache.
  268. self._cache = (toknum, toknum + num_toks, list(tokens))
  269. # Update our mapping.
  270. assert toknum <= self._toknum[-1]
  271. if num_toks > 0:
  272. block_index += 1
  273. if toknum == self._toknum[-1]:
  274. assert new_filepos > self._filepos[-1] # monotonic!
  275. self._filepos.append(new_filepos)
  276. self._toknum.append(toknum + num_toks)
  277. else:
  278. # Check for consistency:
  279. assert (
  280. new_filepos == self._filepos[block_index]
  281. ), "inconsistent block reader (num chars read)"
  282. assert (
  283. toknum + num_toks == self._toknum[block_index]
  284. ), "inconsistent block reader (num tokens returned)"
  285. # If we reached the end of the file, then update self._len
  286. if new_filepos == self._eofpos:
  287. self._len = toknum + num_toks
  288. # Generate the tokens in this block (but skip any tokens
  289. # before start_tok). Note that between yields, our state
  290. # may be modified.
  291. for tok in tokens[max(0, start_tok - toknum) :]:
  292. yield tok
  293. # If we're at the end of the file, then we're done.
  294. assert new_filepos <= self._eofpos
  295. if new_filepos == self._eofpos:
  296. break
  297. # Update our indices
  298. toknum += num_toks
  299. filepos = new_filepos
  300. # If we reach this point, then we should know our length.
  301. assert self._len is not None
  302. # Enforce closing of stream once we reached end of file
  303. # We should have reached EOF once we're out of the while loop.
  304. self.close()
  305. # Use concat for these, so we can use a ConcatenatedCorpusView
  306. # when possible.
  307. def __add__(self, other):
  308. return concat([self, other])
  309. def __radd__(self, other):
  310. return concat([other, self])
  311. def __mul__(self, count):
  312. return concat([self] * count)
  313. def __rmul__(self, count):
  314. return concat([self] * count)
  315. class ConcatenatedCorpusView(AbstractLazySequence):
  316. """
  317. A 'view' of a corpus file that joins together one or more
  318. ``StreamBackedCorpusViews<StreamBackedCorpusView>``. At most
  319. one file handle is left open at any time.
  320. """
  321. def __init__(self, corpus_views):
  322. self._pieces = corpus_views
  323. """A list of the corpus subviews that make up this
  324. concatenation."""
  325. self._offsets = [0]
  326. """A list of offsets, indicating the index at which each
  327. subview begins. In particular::
  328. offsets[i] = sum([len(p) for p in pieces[:i]])"""
  329. self._open_piece = None
  330. """The most recently accessed corpus subview (or None).
  331. Before a new subview is accessed, this subview will be closed."""
  332. def __len__(self):
  333. if len(self._offsets) <= len(self._pieces):
  334. # Iterate to the end of the corpus.
  335. for tok in self.iterate_from(self._offsets[-1]):
  336. pass
  337. return self._offsets[-1]
  338. def close(self):
  339. for piece in self._pieces:
  340. piece.close()
  341. def iterate_from(self, start_tok):
  342. piecenum = bisect.bisect_right(self._offsets, start_tok) - 1
  343. while piecenum < len(self._pieces):
  344. offset = self._offsets[piecenum]
  345. piece = self._pieces[piecenum]
  346. # If we've got another piece open, close it first.
  347. if self._open_piece is not piece:
  348. if self._open_piece is not None:
  349. self._open_piece.close()
  350. self._open_piece = piece
  351. # Get everything we can from this piece.
  352. for tok in piece.iterate_from(max(0, start_tok - offset)):
  353. yield tok
  354. # Update the offset table.
  355. if piecenum + 1 == len(self._offsets):
  356. self._offsets.append(self._offsets[-1] + len(piece))
  357. # Move on to the next piece.
  358. piecenum += 1
  359. def concat(docs):
  360. """
  361. Concatenate together the contents of multiple documents from a
  362. single corpus, using an appropriate concatenation function. This
  363. utility function is used by corpus readers when the user requests
  364. more than one document at a time.
  365. """
  366. if len(docs) == 1:
  367. return docs[0]
  368. if len(docs) == 0:
  369. raise ValueError("concat() expects at least one object!")
  370. types = set(d.__class__ for d in docs)
  371. # If they're all strings, use string concatenation.
  372. if all(isinstance(doc, str) for doc in docs):
  373. return "".join(docs)
  374. # If they're all corpus views, then use ConcatenatedCorpusView.
  375. for typ in types:
  376. if not issubclass(typ, (StreamBackedCorpusView, ConcatenatedCorpusView)):
  377. break
  378. else:
  379. return ConcatenatedCorpusView(docs)
  380. # If they're all lazy sequences, use a lazy concatenation
  381. for typ in types:
  382. if not issubclass(typ, AbstractLazySequence):
  383. break
  384. else:
  385. return LazyConcatenation(docs)
  386. # Otherwise, see what we can do:
  387. if len(types) == 1:
  388. typ = list(types)[0]
  389. if issubclass(typ, list):
  390. return reduce((lambda a, b: a + b), docs, [])
  391. if issubclass(typ, tuple):
  392. return reduce((lambda a, b: a + b), docs, ())
  393. if ElementTree.iselement(typ):
  394. xmltree = ElementTree.Element("documents")
  395. for doc in docs:
  396. xmltree.append(doc)
  397. return xmltree
  398. # No method found!
  399. raise ValueError("Don't know how to concatenate types: %r" % types)
  400. ######################################################################
  401. # { Corpus View for Pickled Sequences
  402. ######################################################################
  403. class PickleCorpusView(StreamBackedCorpusView):
  404. """
  405. A stream backed corpus view for corpus files that consist of
  406. sequences of serialized Python objects (serialized using
  407. ``pickle.dump``). One use case for this class is to store the
  408. result of running feature detection on a corpus to disk. This can
  409. be useful when performing feature detection is expensive (so we
  410. don't want to repeat it); but the corpus is too large to store in
  411. memory. The following example illustrates this technique:
  412. >>> from nltk.corpus.reader.util import PickleCorpusView
  413. >>> from nltk.util import LazyMap
  414. >>> feature_corpus = LazyMap(detect_features, corpus) # doctest: +SKIP
  415. >>> PickleCorpusView.write(feature_corpus, some_fileid) # doctest: +SKIP
  416. >>> pcv = PickleCorpusView(some_fileid) # doctest: +SKIP
  417. """
  418. BLOCK_SIZE = 100
  419. PROTOCOL = -1
  420. def __init__(self, fileid, delete_on_gc=False):
  421. """
  422. Create a new corpus view that reads the pickle corpus
  423. ``fileid``.
  424. :param delete_on_gc: If true, then ``fileid`` will be deleted
  425. whenever this object gets garbage-collected.
  426. """
  427. self._delete_on_gc = delete_on_gc
  428. StreamBackedCorpusView.__init__(self, fileid)
  429. def read_block(self, stream):
  430. result = []
  431. for i in range(self.BLOCK_SIZE):
  432. try:
  433. result.append(pickle.load(stream))
  434. except EOFError:
  435. break
  436. return result
  437. def __del__(self):
  438. """
  439. If ``delete_on_gc`` was set to true when this
  440. ``PickleCorpusView`` was created, then delete the corpus view's
  441. fileid. (This method is called whenever a
  442. ``PickledCorpusView`` is garbage-collected.
  443. """
  444. if getattr(self, "_delete_on_gc"):
  445. if os.path.exists(self._fileid):
  446. try:
  447. os.remove(self._fileid)
  448. except (OSError, IOError):
  449. pass
  450. self.__dict__.clear() # make the garbage collector's job easier
  451. @classmethod
  452. def write(cls, sequence, output_file):
  453. if isinstance(output_file, str):
  454. output_file = open(output_file, "wb")
  455. for item in sequence:
  456. pickle.dump(item, output_file, cls.PROTOCOL)
  457. @classmethod
  458. def cache_to_tempfile(cls, sequence, delete_on_gc=True):
  459. """
  460. Write the given sequence to a temporary file as a pickle
  461. corpus; and then return a ``PickleCorpusView`` view for that
  462. temporary corpus file.
  463. :param delete_on_gc: If true, then the temporary file will be
  464. deleted whenever this object gets garbage-collected.
  465. """
  466. try:
  467. fd, output_file_name = tempfile.mkstemp(".pcv", "nltk-")
  468. output_file = os.fdopen(fd, "wb")
  469. cls.write(sequence, output_file)
  470. output_file.close()
  471. return PickleCorpusView(output_file_name, delete_on_gc)
  472. except (OSError, IOError) as e:
  473. raise ValueError("Error while creating temp file: %s" % e)
  474. ######################################################################
  475. # { Block Readers
  476. ######################################################################
  477. def read_whitespace_block(stream):
  478. toks = []
  479. for i in range(20): # Read 20 lines at a time.
  480. toks.extend(stream.readline().split())
  481. return toks
  482. def read_wordpunct_block(stream):
  483. toks = []
  484. for i in range(20): # Read 20 lines at a time.
  485. toks.extend(wordpunct_tokenize(stream.readline()))
  486. return toks
  487. def read_line_block(stream):
  488. toks = []
  489. for i in range(20):
  490. line = stream.readline()
  491. if not line:
  492. return toks
  493. toks.append(line.rstrip("\n"))
  494. return toks
  495. def read_blankline_block(stream):
  496. s = ""
  497. while True:
  498. line = stream.readline()
  499. # End of file:
  500. if not line:
  501. if s:
  502. return [s]
  503. else:
  504. return []
  505. # Blank line:
  506. elif line and not line.strip():
  507. if s:
  508. return [s]
  509. # Other line:
  510. else:
  511. s += line
  512. def read_alignedsent_block(stream):
  513. s = ""
  514. while True:
  515. line = stream.readline()
  516. if line[0] == "=" or line[0] == "\n" or line[:2] == "\r\n":
  517. continue
  518. # End of file:
  519. if not line:
  520. if s:
  521. return [s]
  522. else:
  523. return []
  524. # Other line:
  525. else:
  526. s += line
  527. if re.match("^\d+-\d+", line) is not None:
  528. return [s]
  529. def read_regexp_block(stream, start_re, end_re=None):
  530. """
  531. Read a sequence of tokens from a stream, where tokens begin with
  532. lines that match ``start_re``. If ``end_re`` is specified, then
  533. tokens end with lines that match ``end_re``; otherwise, tokens end
  534. whenever the next line matching ``start_re`` or EOF is found.
  535. """
  536. # Scan until we find a line matching the start regexp.
  537. while True:
  538. line = stream.readline()
  539. if not line:
  540. return [] # end of file.
  541. if re.match(start_re, line):
  542. break
  543. # Scan until we find another line matching the regexp, or EOF.
  544. lines = [line]
  545. while True:
  546. oldpos = stream.tell()
  547. line = stream.readline()
  548. # End of file:
  549. if not line:
  550. return ["".join(lines)]
  551. # End of token:
  552. if end_re is not None and re.match(end_re, line):
  553. return ["".join(lines)]
  554. # Start of new token: backup to just before it starts, and
  555. # return the token we've already collected.
  556. if end_re is None and re.match(start_re, line):
  557. stream.seek(oldpos)
  558. return ["".join(lines)]
  559. # Anything else is part of the token.
  560. lines.append(line)
  561. def read_sexpr_block(stream, block_size=16384, comment_char=None):
  562. """
  563. Read a sequence of s-expressions from the stream, and leave the
  564. stream's file position at the end the last complete s-expression
  565. read. This function will always return at least one s-expression,
  566. unless there are no more s-expressions in the file.
  567. If the file ends in in the middle of an s-expression, then that
  568. incomplete s-expression is returned when the end of the file is
  569. reached.
  570. :param block_size: The default block size for reading. If an
  571. s-expression is longer than one block, then more than one
  572. block will be read.
  573. :param comment_char: A character that marks comments. Any lines
  574. that begin with this character will be stripped out.
  575. (If spaces or tabs precede the comment character, then the
  576. line will not be stripped.)
  577. """
  578. start = stream.tell()
  579. block = stream.read(block_size)
  580. encoding = getattr(stream, "encoding", None)
  581. assert encoding is not None or isinstance(block, str)
  582. if encoding not in (None, "utf-8"):
  583. import warnings
  584. warnings.warn(
  585. "Parsing may fail, depending on the properties "
  586. "of the %s encoding!" % encoding
  587. )
  588. # (e.g., the utf-16 encoding does not work because it insists
  589. # on adding BOMs to the beginning of encoded strings.)
  590. if comment_char:
  591. COMMENT = re.compile("(?m)^%s.*$" % re.escape(comment_char))
  592. while True:
  593. try:
  594. # If we're stripping comments, then make sure our block ends
  595. # on a line boundary; and then replace any comments with
  596. # space characters. (We can't just strip them out -- that
  597. # would make our offset wrong.)
  598. if comment_char:
  599. block += stream.readline()
  600. block = re.sub(COMMENT, _sub_space, block)
  601. # Read the block.
  602. tokens, offset = _parse_sexpr_block(block)
  603. # Skip whitespace
  604. offset = re.compile(r"\s*").search(block, offset).end()
  605. # Move to the end position.
  606. if encoding is None:
  607. stream.seek(start + offset)
  608. else:
  609. stream.seek(start + len(block[:offset].encode(encoding)))
  610. # Return the list of tokens we processed
  611. return tokens
  612. except ValueError as e:
  613. if e.args[0] == "Block too small":
  614. next_block = stream.read(block_size)
  615. if next_block:
  616. block += next_block
  617. continue
  618. else:
  619. # The file ended mid-sexpr -- return what we got.
  620. return [block.strip()]
  621. else:
  622. raise
  623. def _sub_space(m):
  624. """Helper function: given a regexp match, return a string of
  625. spaces that's the same length as the matched string."""
  626. return " " * (m.end() - m.start())
  627. def _parse_sexpr_block(block):
  628. tokens = []
  629. start = end = 0
  630. while end < len(block):
  631. m = re.compile(r"\S").search(block, end)
  632. if not m:
  633. return tokens, end
  634. start = m.start()
  635. # Case 1: sexpr is not parenthesized.
  636. if m.group() != "(":
  637. m2 = re.compile(r"[\s(]").search(block, start)
  638. if m2:
  639. end = m2.start()
  640. else:
  641. if tokens:
  642. return tokens, end
  643. raise ValueError("Block too small")
  644. # Case 2: parenthesized sexpr.
  645. else:
  646. nesting = 0
  647. for m in re.compile(r"[()]").finditer(block, start):
  648. if m.group() == "(":
  649. nesting += 1
  650. else:
  651. nesting -= 1
  652. if nesting == 0:
  653. end = m.end()
  654. break
  655. else:
  656. if tokens:
  657. return tokens, end
  658. raise ValueError("Block too small")
  659. tokens.append(block[start:end])
  660. return tokens, end
  661. ######################################################################
  662. # { Finding Corpus Items
  663. ######################################################################
  664. def find_corpus_fileids(root, regexp):
  665. if not isinstance(root, PathPointer):
  666. raise TypeError("find_corpus_fileids: expected a PathPointer")
  667. regexp += "$"
  668. # Find fileids in a zipfile: scan the zipfile's namelist. Filter
  669. # out entries that end in '/' -- they're directories.
  670. if isinstance(root, ZipFilePathPointer):
  671. fileids = [
  672. name[len(root.entry) :]
  673. for name in root.zipfile.namelist()
  674. if not name.endswith("/")
  675. ]
  676. items = [name for name in fileids if re.match(regexp, name)]
  677. return sorted(items)
  678. # Find fileids in a directory: use os.walk to search all (proper
  679. # or symlinked) subdirectories, and match paths against the regexp.
  680. elif isinstance(root, FileSystemPathPointer):
  681. items = []
  682. # workaround for py25 which doesn't support followlinks
  683. kwargs = {}
  684. if not py25():
  685. kwargs = {"followlinks": True}
  686. for dirname, subdirs, fileids in os.walk(root.path, **kwargs):
  687. prefix = "".join("%s/" % p for p in _path_from(root.path, dirname))
  688. items += [
  689. prefix + fileid
  690. for fileid in fileids
  691. if re.match(regexp, prefix + fileid)
  692. ]
  693. # Don't visit svn directories:
  694. if ".svn" in subdirs:
  695. subdirs.remove(".svn")
  696. return sorted(items)
  697. else:
  698. raise AssertionError("Don't know how to handle %r" % root)
  699. def _path_from(parent, child):
  700. if os.path.split(parent)[1] == "":
  701. parent = os.path.split(parent)[0]
  702. path = []
  703. while parent != child:
  704. child, dirname = os.path.split(child)
  705. path.insert(0, dirname)
  706. assert os.path.split(child)[0] != child
  707. return path
  708. ######################################################################
  709. # { Paragraph structure in Treebank files
  710. ######################################################################
  711. def tagged_treebank_para_block_reader(stream):
  712. # Read the next paragraph.
  713. para = ""
  714. while True:
  715. line = stream.readline()
  716. # End of paragraph:
  717. if re.match("======+\s*$", line):
  718. if para.strip():
  719. return [para]
  720. # End of file:
  721. elif line == "":
  722. if para.strip():
  723. return [para]
  724. else:
  725. return []
  726. # Content line:
  727. else:
  728. para += line