gaac.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. # Natural Language Toolkit: Group Average Agglomerative Clusterer
  2. #
  3. # Copyright (C) 2001-2020 NLTK Project
  4. # Author: Trevor Cohn <tacohn@cs.mu.oz.au>
  5. # URL: <http://nltk.org/>
  6. # For license information, see LICENSE.TXT
  7. try:
  8. import numpy
  9. except ImportError:
  10. pass
  11. from nltk.cluster.util import VectorSpaceClusterer, Dendrogram, cosine_distance
  12. class GAAClusterer(VectorSpaceClusterer):
  13. """
  14. The Group Average Agglomerative starts with each of the N vectors as singleton
  15. clusters. It then iteratively merges pairs of clusters which have the
  16. closest centroids. This continues until there is only one cluster. The
  17. order of merges gives rise to a dendrogram: a tree with the earlier merges
  18. lower than later merges. The membership of a given number of clusters c, 1
  19. <= c <= N, can be found by cutting the dendrogram at depth c.
  20. This clusterer uses the cosine similarity metric only, which allows for
  21. efficient speed-up in the clustering process.
  22. """
  23. def __init__(self, num_clusters=1, normalise=True, svd_dimensions=None):
  24. VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
  25. self._num_clusters = num_clusters
  26. self._dendrogram = None
  27. self._groups_values = None
  28. def cluster(self, vectors, assign_clusters=False, trace=False):
  29. # stores the merge order
  30. self._dendrogram = Dendrogram(
  31. [numpy.array(vector, numpy.float64) for vector in vectors]
  32. )
  33. return VectorSpaceClusterer.cluster(self, vectors, assign_clusters, trace)
  34. def cluster_vectorspace(self, vectors, trace=False):
  35. # variables describing the initial situation
  36. N = len(vectors)
  37. cluster_len = [1] * N
  38. cluster_count = N
  39. index_map = numpy.arange(N)
  40. # construct the similarity matrix
  41. dims = (N, N)
  42. dist = numpy.ones(dims, dtype=numpy.float) * numpy.inf
  43. for i in range(N):
  44. for j in range(i + 1, N):
  45. dist[i, j] = cosine_distance(vectors[i], vectors[j])
  46. while cluster_count > max(self._num_clusters, 1):
  47. i, j = numpy.unravel_index(dist.argmin(), dims)
  48. if trace:
  49. print("merging %d and %d" % (i, j))
  50. # update similarities for merging i and j
  51. self._merge_similarities(dist, cluster_len, i, j)
  52. # remove j
  53. dist[:, j] = numpy.inf
  54. dist[j, :] = numpy.inf
  55. # merge the clusters
  56. cluster_len[i] = cluster_len[i] + cluster_len[j]
  57. self._dendrogram.merge(index_map[i], index_map[j])
  58. cluster_count -= 1
  59. # update the index map to reflect the indexes if we
  60. # had removed j
  61. index_map[j + 1 :] -= 1
  62. index_map[j] = N
  63. self.update_clusters(self._num_clusters)
  64. def _merge_similarities(self, dist, cluster_len, i, j):
  65. # the new cluster i merged from i and j adopts the average of
  66. # i and j's similarity to each other cluster, weighted by the
  67. # number of points in the clusters i and j
  68. i_weight = cluster_len[i]
  69. j_weight = cluster_len[j]
  70. weight_sum = i_weight + j_weight
  71. # update for x<i
  72. dist[:i, i] = dist[:i, i] * i_weight + dist[:i, j] * j_weight
  73. dist[:i, i] /= weight_sum
  74. # update for i<x<j
  75. dist[i, i + 1 : j] = (
  76. dist[i, i + 1 : j] * i_weight + dist[i + 1 : j, j] * j_weight
  77. )
  78. # update for i<j<x
  79. dist[i, j + 1 :] = dist[i, j + 1 :] * i_weight + dist[j, j + 1 :] * j_weight
  80. dist[i, i + 1 :] /= weight_sum
  81. def update_clusters(self, num_clusters):
  82. clusters = self._dendrogram.groups(num_clusters)
  83. self._centroids = []
  84. for cluster in clusters:
  85. assert len(cluster) > 0
  86. if self._should_normalise:
  87. centroid = self._normalise(cluster[0])
  88. else:
  89. centroid = numpy.array(cluster[0])
  90. for vector in cluster[1:]:
  91. if self._should_normalise:
  92. centroid += self._normalise(vector)
  93. else:
  94. centroid += vector
  95. centroid /= len(cluster)
  96. self._centroids.append(centroid)
  97. self._num_clusters = len(self._centroids)
  98. def classify_vectorspace(self, vector):
  99. best = None
  100. for i in range(self._num_clusters):
  101. centroid = self._centroids[i]
  102. dist = cosine_distance(vector, centroid)
  103. if not best or dist < best[0]:
  104. best = (dist, i)
  105. return best[1]
  106. def dendrogram(self):
  107. """
  108. :return: The dendrogram representing the current clustering
  109. :rtype: Dendrogram
  110. """
  111. return self._dendrogram
  112. def num_clusters(self):
  113. return self._num_clusters
  114. def __repr__(self):
  115. return "<GroupAverageAgglomerative Clusterer n=%d>" % self._num_clusters
  116. def demo():
  117. """
  118. Non-interactive demonstration of the clusterers with simple 2-D data.
  119. """
  120. from nltk.cluster import GAAClusterer
  121. # use a set of tokens with 2D indices
  122. vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
  123. # test the GAAC clusterer with 4 clusters
  124. clusterer = GAAClusterer(4)
  125. clusters = clusterer.cluster(vectors, True)
  126. print("Clusterer:", clusterer)
  127. print("Clustered:", vectors)
  128. print("As:", clusters)
  129. print()
  130. # show the dendrogram
  131. clusterer.dendrogram().show()
  132. # classify a new vector
  133. vector = numpy.array([3, 3])
  134. print("classify(%s):" % vector, end=" ")
  135. print(clusterer.classify(vector))
  136. print()
  137. if __name__ == "__main__":
  138. demo()