rte_classify.py 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. # Natural Language Toolkit: RTE Classifier
  2. #
  3. # Copyright (C) 2001-2020 NLTK Project
  4. # Author: Ewan Klein <ewan@inf.ed.ac.uk>
  5. # URL: <http://nltk.org/>
  6. # For license information, see LICENSE.TXT
  7. """
  8. Simple classifier for RTE corpus.
  9. It calculates the overlap in words and named entities between text and
  10. hypothesis, and also whether there are words / named entities in the
  11. hypothesis which fail to occur in the text, since this is an indicator that
  12. the hypothesis is more informative than (i.e not entailed by) the text.
  13. TO DO: better Named Entity classification
  14. TO DO: add lemmatization
  15. """
  16. from nltk.tokenize import RegexpTokenizer
  17. from nltk.classify.util import accuracy, check_megam_config
  18. from nltk.classify.maxent import MaxentClassifier
  19. class RTEFeatureExtractor(object):
  20. """
  21. This builds a bag of words for both the text and the hypothesis after
  22. throwing away some stopwords, then calculates overlap and difference.
  23. """
  24. def __init__(self, rtepair, stop=True, use_lemmatize=False):
  25. """
  26. :param rtepair: a ``RTEPair`` from which features should be extracted
  27. :param stop: if ``True``, stopwords are thrown away.
  28. :type stop: bool
  29. """
  30. self.stop = stop
  31. self.stopwords = set(
  32. [
  33. "a",
  34. "the",
  35. "it",
  36. "they",
  37. "of",
  38. "in",
  39. "to",
  40. "is",
  41. "have",
  42. "are",
  43. "were",
  44. "and",
  45. "very",
  46. ".",
  47. ",",
  48. ]
  49. )
  50. self.negwords = set(["no", "not", "never", "failed", "rejected", "denied"])
  51. # Try to tokenize so that abbreviations, monetary amounts, email
  52. # addresses, URLs are single tokens.
  53. tokenizer = RegexpTokenizer("[\w.@:/]+|\w+|\$[\d.]+")
  54. # Get the set of word types for text and hypothesis
  55. self.text_tokens = tokenizer.tokenize(rtepair.text)
  56. self.hyp_tokens = tokenizer.tokenize(rtepair.hyp)
  57. self.text_words = set(self.text_tokens)
  58. self.hyp_words = set(self.hyp_tokens)
  59. if use_lemmatize:
  60. self.text_words = set(self._lemmatize(token) for token in self.text_tokens)
  61. self.hyp_words = set(self._lemmatize(token) for token in self.hyp_tokens)
  62. if self.stop:
  63. self.text_words = self.text_words - self.stopwords
  64. self.hyp_words = self.hyp_words - self.stopwords
  65. self._overlap = self.hyp_words & self.text_words
  66. self._hyp_extra = self.hyp_words - self.text_words
  67. self._txt_extra = self.text_words - self.hyp_words
  68. def overlap(self, toktype, debug=False):
  69. """
  70. Compute the overlap between text and hypothesis.
  71. :param toktype: distinguish Named Entities from ordinary words
  72. :type toktype: 'ne' or 'word'
  73. """
  74. ne_overlap = set(token for token in self._overlap if self._ne(token))
  75. if toktype == "ne":
  76. if debug:
  77. print("ne overlap", ne_overlap)
  78. return ne_overlap
  79. elif toktype == "word":
  80. if debug:
  81. print("word overlap", self._overlap - ne_overlap)
  82. return self._overlap - ne_overlap
  83. else:
  84. raise ValueError("Type not recognized:'%s'" % toktype)
  85. def hyp_extra(self, toktype, debug=True):
  86. """
  87. Compute the extraneous material in the hypothesis.
  88. :param toktype: distinguish Named Entities from ordinary words
  89. :type toktype: 'ne' or 'word'
  90. """
  91. ne_extra = set(token for token in self._hyp_extra if self._ne(token))
  92. if toktype == "ne":
  93. return ne_extra
  94. elif toktype == "word":
  95. return self._hyp_extra - ne_extra
  96. else:
  97. raise ValueError("Type not recognized: '%s'" % toktype)
  98. @staticmethod
  99. def _ne(token):
  100. """
  101. This just assumes that words in all caps or titles are
  102. named entities.
  103. :type token: str
  104. """
  105. if token.istitle() or token.isupper():
  106. return True
  107. return False
  108. @staticmethod
  109. def _lemmatize(word):
  110. """
  111. Use morphy from WordNet to find the base form of verbs.
  112. """
  113. lemma = nltk.corpus.wordnet.morphy(word, pos=nltk.corpus.wordnet.VERB)
  114. if lemma is not None:
  115. return lemma
  116. return word
  117. def rte_features(rtepair):
  118. extractor = RTEFeatureExtractor(rtepair)
  119. features = {}
  120. features["alwayson"] = True
  121. features["word_overlap"] = len(extractor.overlap("word"))
  122. features["word_hyp_extra"] = len(extractor.hyp_extra("word"))
  123. features["ne_overlap"] = len(extractor.overlap("ne"))
  124. features["ne_hyp_extra"] = len(extractor.hyp_extra("ne"))
  125. features["neg_txt"] = len(extractor.negwords & extractor.text_words)
  126. features["neg_hyp"] = len(extractor.negwords & extractor.hyp_words)
  127. return features
  128. def rte_featurize(rte_pairs):
  129. return [(rte_features(pair), pair.value) for pair in rte_pairs]
  130. def rte_classifier(algorithm):
  131. from nltk.corpus import rte as rte_corpus
  132. train_set = rte_corpus.pairs(["rte1_dev.xml", "rte2_dev.xml", "rte3_dev.xml"])
  133. test_set = rte_corpus.pairs(["rte1_test.xml", "rte2_test.xml", "rte3_test.xml"])
  134. featurized_train_set = rte_featurize(train_set)
  135. featurized_test_set = rte_featurize(test_set)
  136. # Train the classifier
  137. print("Training classifier...")
  138. if algorithm in ["megam", "BFGS"]: # MEGAM based algorithms.
  139. # Ensure that MEGAM is configured first.
  140. check_megam_config()
  141. clf = lambda x: MaxentClassifier.train(featurized_train_set, algorithm)
  142. elif algorithm in ["GIS", "IIS"]: # Use default GIS/IIS MaxEnt algorithm
  143. clf = MaxentClassifier.train(featurized_train_set, algorithm)
  144. else:
  145. err_msg = str(
  146. "RTEClassifier only supports these algorithms:\n "
  147. "'megam', 'BFGS', 'GIS', 'IIS'.\n"
  148. )
  149. raise Exception(err_msg)
  150. print("Testing classifier...")
  151. acc = accuracy(clf, featurized_test_set)
  152. print("Accuracy: %6.4f" % acc)
  153. return clf