destructive.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. # Natural Language Toolkit: NLTK's very own tokenizer.
  2. #
  3. # Copyright (C) 2001-2020 NLTK Project
  4. # Author:
  5. # URL: <http://nltk.sourceforge.net>
  6. # For license information, see LICENSE.TXT
  7. import re
  8. from nltk.tokenize.api import TokenizerI
  9. class MacIntyreContractions:
  10. """
  11. List of contractions adapted from Robert MacIntyre's tokenizer.
  12. """
  13. CONTRACTIONS2 = [
  14. r"(?i)\b(can)(?#X)(not)\b",
  15. r"(?i)\b(d)(?#X)('ye)\b",
  16. r"(?i)\b(gim)(?#X)(me)\b",
  17. r"(?i)\b(gon)(?#X)(na)\b",
  18. r"(?i)\b(got)(?#X)(ta)\b",
  19. r"(?i)\b(lem)(?#X)(me)\b",
  20. r"(?i)\b(mor)(?#X)('n)\b",
  21. r"(?i)\b(wan)(?#X)(na)\s",
  22. ]
  23. CONTRACTIONS3 = [r"(?i) ('t)(?#X)(is)\b", r"(?i) ('t)(?#X)(was)\b"]
  24. CONTRACTIONS4 = [r"(?i)\b(whad)(dd)(ya)\b", r"(?i)\b(wha)(t)(cha)\b"]
  25. class NLTKWordTokenizer(TokenizerI):
  26. """
  27. The NLTK tokenizer that has improved upon the TreebankWordTokenizer.
  28. The tokenizer is "destructive" such that the regexes applied will munge the
  29. input string to a state beyond re-construction. It is possible to apply
  30. `TreebankWordDetokenizer.detokenize` to the tokenized outputs of
  31. `NLTKDestructiveWordTokenizer.tokenize` but there's no guarantees to
  32. revert to the original string.
  33. """
  34. # Starting quotes.
  35. STARTING_QUOTES = [
  36. (re.compile(u"([«“‘„]|[`]+)", re.U), r" \1 "),
  37. (re.compile(r"^\""), r"``"),
  38. (re.compile(r"(``)"), r" \1 "),
  39. (re.compile(r"([ \(\[{<])(\"|\'{2})"), r"\1 `` "),
  40. (re.compile(r"(?i)(\')(?!re|ve|ll|m|t|s|d)(\w)\b", re.U), r"\1 \2"),
  41. ]
  42. # Ending quotes.
  43. ENDING_QUOTES = [
  44. (re.compile(u"([»”’])", re.U), r" \1 "),
  45. (re.compile(r'"'), " '' "),
  46. (re.compile(r"(\S)(\'\')"), r"\1 \2 "),
  47. (re.compile(r"([^' ])('[sS]|'[mM]|'[dD]|') "), r"\1 \2 "),
  48. (re.compile(r"([^' ])('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1 \2 "),
  49. ]
  50. # For improvements for starting/closing quotes from TreebankWordTokenizer,
  51. # see discussion on https://github.com/nltk/nltk/pull/1437
  52. # Adding to TreebankWordTokenizer, nltk.word_tokenize now splits on
  53. # - chervon quotes u'\xab' and u'\xbb' .
  54. # - unicode quotes u'\u2018', u'\u2019', u'\u201c' and u'\u201d'
  55. # See https://github.com/nltk/nltk/issues/1995#issuecomment-376741608
  56. # Also, behavior of splitting on clitics now follows Stanford CoreNLP
  57. # - clitics covered (?!re|ve|ll|m|t|s|d)(\w)\b
  58. # Punctuation.
  59. PUNCTUATION = [
  60. (re.compile(r'([^\.])(\.)([\]\)}>"\'' u"»”’ " r"]*)\s*$", re.U), r"\1 \2 \3 "),
  61. (re.compile(r"([:,])([^\d])"), r" \1 \2"),
  62. (re.compile(r"([:,])$"), r" \1 "),
  63. (re.compile(r"\.{2,}", re.U), r" \g<0> "), # See https://github.com/nltk/nltk/pull/2322
  64. (re.compile(r"[;@#$%&]"), r" \g<0> "),
  65. (
  66. re.compile(r'([^\.])(\.)([\]\)}>"\']*)\s*$'),
  67. r"\1 \2\3 ",
  68. ), # Handles the final period.
  69. (re.compile(r"[?!]"), r" \g<0> "),
  70. (re.compile(r"([^'])' "), r"\1 ' "),
  71. (re.compile(r"[*]", re.U), r" \g<0> "), # See https://github.com/nltk/nltk/pull/2322
  72. ]
  73. # Pads parentheses
  74. PARENS_BRACKETS = (re.compile(r"[\]\[\(\)\{\}\<\>]"), r" \g<0> ")
  75. # Optionally: Convert parentheses, brackets and converts them to PTB symbols.
  76. CONVERT_PARENTHESES = [
  77. (re.compile(r"\("), "-LRB-"),
  78. (re.compile(r"\)"), "-RRB-"),
  79. (re.compile(r"\["), "-LSB-"),
  80. (re.compile(r"\]"), "-RSB-"),
  81. (re.compile(r"\{"), "-LCB-"),
  82. (re.compile(r"\}"), "-RCB-"),
  83. ]
  84. DOUBLE_DASHES = (re.compile(r"--"), r" -- ")
  85. # List of contractions adapted from Robert MacIntyre's tokenizer.
  86. _contractions = MacIntyreContractions()
  87. CONTRACTIONS2 = list(map(re.compile, _contractions.CONTRACTIONS2))
  88. CONTRACTIONS3 = list(map(re.compile, _contractions.CONTRACTIONS3))
  89. def tokenize(self, text, convert_parentheses=False, return_str=False):
  90. for regexp, substitution in self.STARTING_QUOTES:
  91. text = regexp.sub(substitution, text)
  92. for regexp, substitution in self.PUNCTUATION:
  93. text = regexp.sub(substitution, text)
  94. # Handles parentheses.
  95. regexp, substitution = self.PARENS_BRACKETS
  96. text = regexp.sub(substitution, text)
  97. # Optionally convert parentheses
  98. if convert_parentheses:
  99. for regexp, substitution in self.CONVERT_PARENTHESES:
  100. text = regexp.sub(substitution, text)
  101. # Handles double dash.
  102. regexp, substitution = self.DOUBLE_DASHES
  103. text = regexp.sub(substitution, text)
  104. # add extra space to make things easier
  105. text = " " + text + " "
  106. for regexp, substitution in self.ENDING_QUOTES:
  107. text = regexp.sub(substitution, text)
  108. for regexp in self.CONTRACTIONS2:
  109. text = regexp.sub(r" \1 \2 ", text)
  110. for regexp in self.CONTRACTIONS3:
  111. text = regexp.sub(r" \1 \2 ", text)
  112. # We are not using CONTRACTIONS4 since
  113. # they are also commented out in the SED scripts
  114. # for regexp in self._contractions.CONTRACTIONS4:
  115. # text = regexp.sub(r' \1 \2 \3 ', text)
  116. return text if return_str else text.split()