probability.py 89 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574
  1. # -*- coding: utf-8 -*-
  2. # Natural Language Toolkit: Probability and Statistics
  3. #
  4. # Copyright (C) 2001-2020 NLTK Project
  5. # Author: Edward Loper <edloper@gmail.com>
  6. # Steven Bird <stevenbird1@gmail.com> (additions)
  7. # Trevor Cohn <tacohn@cs.mu.oz.au> (additions)
  8. # Peter Ljunglöf <peter.ljunglof@heatherleaf.se> (additions)
  9. # Liang Dong <ldong@clemson.edu> (additions)
  10. # Geoffrey Sampson <sampson@cantab.net> (additions)
  11. # Ilia Kurenkov <ilia.kurenkov@gmail.com> (additions)
  12. #
  13. # URL: <http://nltk.org/>
  14. # For license information, see LICENSE.TXT
  15. """
  16. Classes for representing and processing probabilistic information.
  17. The ``FreqDist`` class is used to encode "frequency distributions",
  18. which count the number of times that each outcome of an experiment
  19. occurs.
  20. The ``ProbDistI`` class defines a standard interface for "probability
  21. distributions", which encode the probability of each outcome for an
  22. experiment. There are two types of probability distribution:
  23. - "derived probability distributions" are created from frequency
  24. distributions. They attempt to model the probability distribution
  25. that generated the frequency distribution.
  26. - "analytic probability distributions" are created directly from
  27. parameters (such as variance).
  28. The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
  29. are used to encode conditional distributions. Conditional probability
  30. distributions can be derived or analytic; but currently the only
  31. implementation of the ``ConditionalProbDistI`` interface is
  32. ``ConditionalProbDist``, a derived distribution.
  33. """
  34. import math
  35. import random
  36. import warnings
  37. import array
  38. from collections import defaultdict, Counter
  39. from functools import reduce
  40. from abc import ABCMeta, abstractmethod
  41. from nltk.internals import raise_unorderable_types
  42. _NINF = float("-1e300")
  43. ##//////////////////////////////////////////////////////
  44. ## Frequency Distributions
  45. ##//////////////////////////////////////////////////////
  46. class FreqDist(Counter):
  47. """
  48. A frequency distribution for the outcomes of an experiment. A
  49. frequency distribution records the number of times each outcome of
  50. an experiment has occurred. For example, a frequency distribution
  51. could be used to record the frequency of each word type in a
  52. document. Formally, a frequency distribution can be defined as a
  53. function mapping from each sample to the number of times that
  54. sample occurred as an outcome.
  55. Frequency distributions are generally constructed by running a
  56. number of experiments, and incrementing the count for a sample
  57. every time it is an outcome of an experiment. For example, the
  58. following code will produce a frequency distribution that encodes
  59. how often each word occurs in a text:
  60. >>> from nltk.tokenize import word_tokenize
  61. >>> from nltk.probability import FreqDist
  62. >>> sent = 'This is an example sentence'
  63. >>> fdist = FreqDist()
  64. >>> for word in word_tokenize(sent):
  65. ... fdist[word.lower()] += 1
  66. An equivalent way to do this is with the initializer:
  67. >>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
  68. """
  69. def __init__(self, samples=None):
  70. """
  71. Construct a new frequency distribution. If ``samples`` is
  72. given, then the frequency distribution will be initialized
  73. with the count of each object in ``samples``; otherwise, it
  74. will be initialized to be empty.
  75. In particular, ``FreqDist()`` returns an empty frequency
  76. distribution; and ``FreqDist(samples)`` first creates an empty
  77. frequency distribution, and then calls ``update`` with the
  78. list ``samples``.
  79. :param samples: The samples to initialize the frequency
  80. distribution with.
  81. :type samples: Sequence
  82. """
  83. Counter.__init__(self, samples)
  84. # Cached number of samples in this FreqDist
  85. self._N = None
  86. def N(self):
  87. """
  88. Return the total number of sample outcomes that have been
  89. recorded by this FreqDist. For the number of unique
  90. sample values (or bins) with counts greater than zero, use
  91. ``FreqDist.B()``.
  92. :rtype: int
  93. """
  94. if self._N is None:
  95. # Not already cached, or cache has been invalidated
  96. self._N = sum(self.values())
  97. return self._N
  98. def __setitem__(self, key, val):
  99. """
  100. Override ``Counter.__setitem__()`` to invalidate the cached N
  101. """
  102. self._N = None
  103. super(FreqDist, self).__setitem__(key, val)
  104. def __delitem__(self, key):
  105. """
  106. Override ``Counter.__delitem__()`` to invalidate the cached N
  107. """
  108. self._N = None
  109. super(FreqDist, self).__delitem__(key)
  110. def update(self, *args, **kwargs):
  111. """
  112. Override ``Counter.update()`` to invalidate the cached N
  113. """
  114. self._N = None
  115. super(FreqDist, self).update(*args, **kwargs)
  116. def setdefault(self, key, val):
  117. """
  118. Override ``Counter.setdefault()`` to invalidate the cached N
  119. """
  120. self._N = None
  121. super(FreqDist, self).setdefault(key, val)
  122. def B(self):
  123. """
  124. Return the total number of sample values (or "bins") that
  125. have counts greater than zero. For the total
  126. number of sample outcomes recorded, use ``FreqDist.N()``.
  127. (FreqDist.B() is the same as len(FreqDist).)
  128. :rtype: int
  129. """
  130. return len(self)
  131. def hapaxes(self):
  132. """
  133. Return a list of all samples that occur once (hapax legomena)
  134. :rtype: list
  135. """
  136. return [item for item in self if self[item] == 1]
  137. def Nr(self, r, bins=None):
  138. return self.r_Nr(bins)[r]
  139. def r_Nr(self, bins=None):
  140. """
  141. Return the dictionary mapping r to Nr, the number of samples with frequency r, where Nr > 0.
  142. :type bins: int
  143. :param bins: The number of possible sample outcomes. ``bins``
  144. is used to calculate Nr(0). In particular, Nr(0) is
  145. ``bins-self.B()``. If ``bins`` is not specified, it
  146. defaults to ``self.B()`` (so Nr(0) will be 0).
  147. :rtype: int
  148. """
  149. _r_Nr = defaultdict(int)
  150. for count in self.values():
  151. _r_Nr[count] += 1
  152. # Special case for Nr[0]:
  153. _r_Nr[0] = bins - self.B() if bins is not None else 0
  154. return _r_Nr
  155. def _cumulative_frequencies(self, samples):
  156. """
  157. Return the cumulative frequencies of the specified samples.
  158. If no samples are specified, all counts are returned, starting
  159. with the largest.
  160. :param samples: the samples whose frequencies should be returned.
  161. :type samples: any
  162. :rtype: list(float)
  163. """
  164. cf = 0.0
  165. for sample in samples:
  166. cf += self[sample]
  167. yield cf
  168. # slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
  169. # here, freq() does probs
  170. def freq(self, sample):
  171. """
  172. Return the frequency of a given sample. The frequency of a
  173. sample is defined as the count of that sample divided by the
  174. total number of sample outcomes that have been recorded by
  175. this FreqDist. The count of a sample is defined as the
  176. number of times that sample outcome was recorded by this
  177. FreqDist. Frequencies are always real numbers in the range
  178. [0, 1].
  179. :param sample: the sample whose frequency
  180. should be returned.
  181. :type sample: any
  182. :rtype: float
  183. """
  184. n = self.N()
  185. if n == 0:
  186. return 0
  187. return self[sample] / n
  188. def max(self):
  189. """
  190. Return the sample with the greatest number of outcomes in this
  191. frequency distribution. If two or more samples have the same
  192. number of outcomes, return one of them; which sample is
  193. returned is undefined. If no outcomes have occurred in this
  194. frequency distribution, return None.
  195. :return: The sample with the maximum number of outcomes in this
  196. frequency distribution.
  197. :rtype: any or None
  198. """
  199. if len(self) == 0:
  200. raise ValueError(
  201. "A FreqDist must have at least one sample before max is defined."
  202. )
  203. return self.most_common(1)[0][0]
  204. def plot(self, *args, **kwargs):
  205. """
  206. Plot samples from the frequency distribution
  207. displaying the most frequent sample first. If an integer
  208. parameter is supplied, stop after this many samples have been
  209. plotted. For a cumulative plot, specify cumulative=True.
  210. (Requires Matplotlib to be installed.)
  211. :param title: The title for the graph
  212. :type title: str
  213. :param cumulative: A flag to specify whether the plot is cumulative (default = False)
  214. :type title: bool
  215. """
  216. try:
  217. import matplotlib.pyplot as plt
  218. except ImportError:
  219. raise ValueError(
  220. "The plot function requires matplotlib to be installed."
  221. "See http://matplotlib.org/"
  222. )
  223. if len(args) == 0:
  224. args = [len(self)]
  225. samples = [item for item, _ in self.most_common(*args)]
  226. cumulative = _get_kwarg(kwargs, "cumulative", False)
  227. percents = _get_kwarg(kwargs, "percents", False)
  228. if cumulative:
  229. freqs = list(self._cumulative_frequencies(samples))
  230. ylabel = "Cumulative Counts"
  231. if percents:
  232. freqs = [f / freqs[len(freqs) - 1] * 100 for f in freqs]
  233. ylabel = "Cumulative Percents"
  234. else:
  235. freqs = [self[sample] for sample in samples]
  236. ylabel = "Counts"
  237. # percents = [f * 100 for f in freqs] only in ProbDist?
  238. ax = plt.gca()
  239. ax.grid(True, color="silver")
  240. if "linewidth" not in kwargs:
  241. kwargs["linewidth"] = 2
  242. if "title" in kwargs:
  243. ax.set_title(kwargs["title"])
  244. del kwargs["title"]
  245. ax.plot(freqs, **kwargs)
  246. ax.set_xticks(range(len(samples)))
  247. ax.set_xticklabels([str(s) for s in samples], rotation=90)
  248. ax.set_xlabel("Samples")
  249. ax.set_ylabel(ylabel)
  250. plt.show()
  251. return ax
  252. def tabulate(self, *args, **kwargs):
  253. """
  254. Tabulate the given samples from the frequency distribution (cumulative),
  255. displaying the most frequent sample first. If an integer
  256. parameter is supplied, stop after this many samples have been
  257. plotted.
  258. :param samples: The samples to plot (default is all samples)
  259. :type samples: list
  260. :param cumulative: A flag to specify whether the freqs are cumulative (default = False)
  261. :type title: bool
  262. """
  263. if len(args) == 0:
  264. args = [len(self)]
  265. samples = [item for item, _ in self.most_common(*args)]
  266. cumulative = _get_kwarg(kwargs, "cumulative", False)
  267. if cumulative:
  268. freqs = list(self._cumulative_frequencies(samples))
  269. else:
  270. freqs = [self[sample] for sample in samples]
  271. # percents = [f * 100 for f in freqs] only in ProbDist?
  272. width = max(len("{}".format(s)) for s in samples)
  273. width = max(width, max(len("%d" % f) for f in freqs))
  274. for i in range(len(samples)):
  275. print("%*s" % (width, samples[i]), end=" ")
  276. print()
  277. for i in range(len(samples)):
  278. print("%*d" % (width, freqs[i]), end=" ")
  279. print()
  280. def copy(self):
  281. """
  282. Create a copy of this frequency distribution.
  283. :rtype: FreqDist
  284. """
  285. return self.__class__(self)
  286. # Mathematical operatiors
  287. def __add__(self, other):
  288. """
  289. Add counts from two counters.
  290. >>> FreqDist('abbb') + FreqDist('bcc')
  291. FreqDist({'b': 4, 'c': 2, 'a': 1})
  292. """
  293. return self.__class__(super(FreqDist, self).__add__(other))
  294. def __sub__(self, other):
  295. """
  296. Subtract count, but keep only results with positive counts.
  297. >>> FreqDist('abbbc') - FreqDist('bccd')
  298. FreqDist({'b': 2, 'a': 1})
  299. """
  300. return self.__class__(super(FreqDist, self).__sub__(other))
  301. def __or__(self, other):
  302. """
  303. Union is the maximum of value in either of the input counters.
  304. >>> FreqDist('abbb') | FreqDist('bcc')
  305. FreqDist({'b': 3, 'c': 2, 'a': 1})
  306. """
  307. return self.__class__(super(FreqDist, self).__or__(other))
  308. def __and__(self, other):
  309. """
  310. Intersection is the minimum of corresponding counts.
  311. >>> FreqDist('abbb') & FreqDist('bcc')
  312. FreqDist({'b': 1})
  313. """
  314. return self.__class__(super(FreqDist, self).__and__(other))
  315. def __le__(self, other):
  316. """
  317. Returns True if this frequency distribution is a subset of the other
  318. and for no key the value exceeds the value of the same key from
  319. the other frequency distribution.
  320. The <= operator forms partial order and satisfying the axioms
  321. reflexivity, antisymmetry and transitivity.
  322. >>> FreqDist('a') <= FreqDist('a')
  323. True
  324. >>> a = FreqDist('abc')
  325. >>> b = FreqDist('aabc')
  326. >>> (a <= b, b <= a)
  327. (True, False)
  328. >>> FreqDist('a') <= FreqDist('abcd')
  329. True
  330. >>> FreqDist('abc') <= FreqDist('xyz')
  331. False
  332. >>> FreqDist('xyz') <= FreqDist('abc')
  333. False
  334. >>> c = FreqDist('a')
  335. >>> d = FreqDist('aa')
  336. >>> e = FreqDist('aaa')
  337. >>> c <= d and d <= e and c <= e
  338. True
  339. """
  340. if not isinstance(other, FreqDist):
  341. raise_unorderable_types("<=", self, other)
  342. return set(self).issubset(other) and all(
  343. self[key] <= other[key] for key in self
  344. )
  345. def __ge__(self, other):
  346. if not isinstance(other, FreqDist):
  347. raise_unorderable_types(">=", self, other)
  348. return set(self).issuperset(other) and all(
  349. self[key] >= other[key] for key in other
  350. )
  351. __lt__ = lambda self, other: self <= other and not self == other
  352. __gt__ = lambda self, other: self >= other and not self == other
  353. def __repr__(self):
  354. """
  355. Return a string representation of this FreqDist.
  356. :rtype: string
  357. """
  358. return self.pformat()
  359. def pprint(self, maxlen=10, stream=None):
  360. """
  361. Print a string representation of this FreqDist to 'stream'
  362. :param maxlen: The maximum number of items to print
  363. :type maxlen: int
  364. :param stream: The stream to print to. stdout by default
  365. """
  366. print(self.pformat(maxlen=maxlen), file=stream)
  367. def pformat(self, maxlen=10):
  368. """
  369. Return a string representation of this FreqDist.
  370. :param maxlen: The maximum number of items to display
  371. :type maxlen: int
  372. :rtype: string
  373. """
  374. items = ["{0!r}: {1!r}".format(*item) for item in self.most_common(maxlen)]
  375. if len(self) > maxlen:
  376. items.append("...")
  377. return "FreqDist({{{0}}})".format(", ".join(items))
  378. def __str__(self):
  379. """
  380. Return a string representation of this FreqDist.
  381. :rtype: string
  382. """
  383. return "<FreqDist with %d samples and %d outcomes>" % (len(self), self.N())
  384. def __iter__(self):
  385. """
  386. Return an iterator which yields tokens ordered by frequency.
  387. :rtype: iterator
  388. """
  389. for token, _ in self.most_common(self.B()):
  390. yield token
  391. ##//////////////////////////////////////////////////////
  392. ## Probability Distributions
  393. ##//////////////////////////////////////////////////////
  394. class ProbDistI(metaclass=ABCMeta):
  395. """
  396. A probability distribution for the outcomes of an experiment. A
  397. probability distribution specifies how likely it is that an
  398. experiment will have any given outcome. For example, a
  399. probability distribution could be used to predict the probability
  400. that a token in a document will have a given type. Formally, a
  401. probability distribution can be defined as a function mapping from
  402. samples to nonnegative real numbers, such that the sum of every
  403. number in the function's range is 1.0. A ``ProbDist`` is often
  404. used to model the probability distribution of the experiment used
  405. to generate a frequency distribution.
  406. """
  407. SUM_TO_ONE = True
  408. """True if the probabilities of the samples in this probability
  409. distribution will always sum to one."""
  410. @abstractmethod
  411. def __init__(self):
  412. """
  413. Classes inheriting from ProbDistI should implement __init__.
  414. """
  415. @abstractmethod
  416. def prob(self, sample):
  417. """
  418. Return the probability for a given sample. Probabilities
  419. are always real numbers in the range [0, 1].
  420. :param sample: The sample whose probability
  421. should be returned.
  422. :type sample: any
  423. :rtype: float
  424. """
  425. def logprob(self, sample):
  426. """
  427. Return the base 2 logarithm of the probability for a given sample.
  428. :param sample: The sample whose probability
  429. should be returned.
  430. :type sample: any
  431. :rtype: float
  432. """
  433. # Default definition, in terms of prob()
  434. p = self.prob(sample)
  435. return math.log(p, 2) if p != 0 else _NINF
  436. @abstractmethod
  437. def max(self):
  438. """
  439. Return the sample with the greatest probability. If two or
  440. more samples have the same probability, return one of them;
  441. which sample is returned is undefined.
  442. :rtype: any
  443. """
  444. @abstractmethod
  445. def samples(self):
  446. """
  447. Return a list of all samples that have nonzero probabilities.
  448. Use ``prob`` to find the probability of each sample.
  449. :rtype: list
  450. """
  451. # cf self.SUM_TO_ONE
  452. def discount(self):
  453. """
  454. Return the ratio by which counts are discounted on average: c*/c
  455. :rtype: float
  456. """
  457. return 0.0
  458. # Subclasses should define more efficient implementations of this,
  459. # where possible.
  460. def generate(self):
  461. """
  462. Return a randomly selected sample from this probability distribution.
  463. The probability of returning each sample ``samp`` is equal to
  464. ``self.prob(samp)``.
  465. """
  466. p = random.random()
  467. p_init = p
  468. for sample in self.samples():
  469. p -= self.prob(sample)
  470. if p <= 0:
  471. return sample
  472. # allow for some rounding error:
  473. if p < 0.0001:
  474. return sample
  475. # we *should* never get here
  476. if self.SUM_TO_ONE:
  477. warnings.warn(
  478. "Probability distribution %r sums to %r; generate()"
  479. " is returning an arbitrary sample." % (self, p_init - p)
  480. )
  481. return random.choice(list(self.samples()))
  482. class UniformProbDist(ProbDistI):
  483. """
  484. A probability distribution that assigns equal probability to each
  485. sample in a given set; and a zero probability to all other
  486. samples.
  487. """
  488. def __init__(self, samples):
  489. """
  490. Construct a new uniform probability distribution, that assigns
  491. equal probability to each sample in ``samples``.
  492. :param samples: The samples that should be given uniform
  493. probability.
  494. :type samples: list
  495. :raise ValueError: If ``samples`` is empty.
  496. """
  497. if len(samples) == 0:
  498. raise ValueError(
  499. "A Uniform probability distribution must " + "have at least one sample."
  500. )
  501. self._sampleset = set(samples)
  502. self._prob = 1.0 / len(self._sampleset)
  503. self._samples = list(self._sampleset)
  504. def prob(self, sample):
  505. return self._prob if sample in self._sampleset else 0
  506. def max(self):
  507. return self._samples[0]
  508. def samples(self):
  509. return self._samples
  510. def __repr__(self):
  511. return "<UniformProbDist with %d samples>" % len(self._sampleset)
  512. class RandomProbDist(ProbDistI):
  513. """
  514. Generates a random probability distribution whereby each sample
  515. will be between 0 and 1 with equal probability (uniform random distribution.
  516. Also called a continuous uniform distribution).
  517. """
  518. def __init__(self, samples):
  519. if len(samples) == 0:
  520. raise ValueError(
  521. "A probability distribution must " + "have at least one sample."
  522. )
  523. self._probs = self.unirand(samples)
  524. self._samples = list(self._probs.keys())
  525. @classmethod
  526. def unirand(cls, samples):
  527. """
  528. The key function that creates a randomized initial distribution
  529. that still sums to 1. Set as a dictionary of prob values so that
  530. it can still be passed to MutableProbDist and called with identical
  531. syntax to UniformProbDist
  532. """
  533. samples = set(samples)
  534. randrow = [random.random() for i in range(len(samples))]
  535. total = sum(randrow)
  536. for i, x in enumerate(randrow):
  537. randrow[i] = x / total
  538. total = sum(randrow)
  539. if total != 1:
  540. # this difference, if present, is so small (near NINF) that it
  541. # can be subtracted from any element without risking probs not (0 1)
  542. randrow[-1] -= total - 1
  543. return dict((s, randrow[i]) for i, s in enumerate(samples))
  544. def max(self):
  545. if not hasattr(self, "_max"):
  546. self._max = max((p, v) for (v, p) in self._probs.items())[1]
  547. return self._max
  548. def prob(self, sample):
  549. return self._probs.get(sample, 0)
  550. def samples(self):
  551. return self._samples
  552. def __repr__(self):
  553. return "<RandomUniformProbDist with %d samples>" % len(self._probs)
  554. class DictionaryProbDist(ProbDistI):
  555. """
  556. A probability distribution whose probabilities are directly
  557. specified by a given dictionary. The given dictionary maps
  558. samples to probabilities.
  559. """
  560. def __init__(self, prob_dict=None, log=False, normalize=False):
  561. """
  562. Construct a new probability distribution from the given
  563. dictionary, which maps values to probabilities (or to log
  564. probabilities, if ``log`` is true). If ``normalize`` is
  565. true, then the probability values are scaled by a constant
  566. factor such that they sum to 1.
  567. If called without arguments, the resulting probability
  568. distribution assigns zero probability to all values.
  569. """
  570. self._prob_dict = prob_dict.copy() if prob_dict is not None else {}
  571. self._log = log
  572. # Normalize the distribution, if requested.
  573. if normalize:
  574. if len(prob_dict) == 0:
  575. raise ValueError(
  576. "A DictionaryProbDist must have at least one sample "
  577. + "before it can be normalized."
  578. )
  579. if log:
  580. value_sum = sum_logs(list(self._prob_dict.values()))
  581. if value_sum <= _NINF:
  582. logp = math.log(1.0 / len(prob_dict), 2)
  583. for x in prob_dict:
  584. self._prob_dict[x] = logp
  585. else:
  586. for (x, p) in self._prob_dict.items():
  587. self._prob_dict[x] -= value_sum
  588. else:
  589. value_sum = sum(self._prob_dict.values())
  590. if value_sum == 0:
  591. p = 1.0 / len(prob_dict)
  592. for x in prob_dict:
  593. self._prob_dict[x] = p
  594. else:
  595. norm_factor = 1.0 / value_sum
  596. for (x, p) in self._prob_dict.items():
  597. self._prob_dict[x] *= norm_factor
  598. def prob(self, sample):
  599. if self._log:
  600. return 2 ** (self._prob_dict[sample]) if sample in self._prob_dict else 0
  601. else:
  602. return self._prob_dict.get(sample, 0)
  603. def logprob(self, sample):
  604. if self._log:
  605. return self._prob_dict.get(sample, _NINF)
  606. else:
  607. if sample not in self._prob_dict:
  608. return _NINF
  609. elif self._prob_dict[sample] == 0:
  610. return _NINF
  611. else:
  612. return math.log(self._prob_dict[sample], 2)
  613. def max(self):
  614. if not hasattr(self, "_max"):
  615. self._max = max((p, v) for (v, p) in self._prob_dict.items())[1]
  616. return self._max
  617. def samples(self):
  618. return self._prob_dict.keys()
  619. def __repr__(self):
  620. return "<ProbDist with %d samples>" % len(self._prob_dict)
  621. class MLEProbDist(ProbDistI):
  622. """
  623. The maximum likelihood estimate for the probability distribution
  624. of the experiment used to generate a frequency distribution. The
  625. "maximum likelihood estimate" approximates the probability of
  626. each sample as the frequency of that sample in the frequency
  627. distribution.
  628. """
  629. def __init__(self, freqdist, bins=None):
  630. """
  631. Use the maximum likelihood estimate to create a probability
  632. distribution for the experiment used to generate ``freqdist``.
  633. :type freqdist: FreqDist
  634. :param freqdist: The frequency distribution that the
  635. probability estimates should be based on.
  636. """
  637. self._freqdist = freqdist
  638. def freqdist(self):
  639. """
  640. Return the frequency distribution that this probability
  641. distribution is based on.
  642. :rtype: FreqDist
  643. """
  644. return self._freqdist
  645. def prob(self, sample):
  646. return self._freqdist.freq(sample)
  647. def max(self):
  648. return self._freqdist.max()
  649. def samples(self):
  650. return self._freqdist.keys()
  651. def __repr__(self):
  652. """
  653. :rtype: str
  654. :return: A string representation of this ``ProbDist``.
  655. """
  656. return "<MLEProbDist based on %d samples>" % self._freqdist.N()
  657. class LidstoneProbDist(ProbDistI):
  658. """
  659. The Lidstone estimate for the probability distribution of the
  660. experiment used to generate a frequency distribution. The
  661. "Lidstone estimate" is parameterized by a real number *gamma*,
  662. which typically ranges from 0 to 1. The Lidstone estimate
  663. approximates the probability of a sample with count *c* from an
  664. experiment with *N* outcomes and *B* bins as
  665. ``c+gamma)/(N+B*gamma)``. This is equivalent to adding
  666. *gamma* to the count for each bin, and taking the maximum
  667. likelihood estimate of the resulting frequency distribution.
  668. """
  669. SUM_TO_ONE = False
  670. def __init__(self, freqdist, gamma, bins=None):
  671. """
  672. Use the Lidstone estimate to create a probability distribution
  673. for the experiment used to generate ``freqdist``.
  674. :type freqdist: FreqDist
  675. :param freqdist: The frequency distribution that the
  676. probability estimates should be based on.
  677. :type gamma: float
  678. :param gamma: A real number used to parameterize the
  679. estimate. The Lidstone estimate is equivalent to adding
  680. *gamma* to the count for each bin, and taking the
  681. maximum likelihood estimate of the resulting frequency
  682. distribution.
  683. :type bins: int
  684. :param bins: The number of sample values that can be generated
  685. by the experiment that is described by the probability
  686. distribution. This value must be correctly set for the
  687. probabilities of the sample values to sum to one. If
  688. ``bins`` is not specified, it defaults to ``freqdist.B()``.
  689. """
  690. if (bins == 0) or (bins is None and freqdist.N() == 0):
  691. name = self.__class__.__name__[:-8]
  692. raise ValueError(
  693. "A %s probability distribution " % name + "must have at least one bin."
  694. )
  695. if (bins is not None) and (bins < freqdist.B()):
  696. name = self.__class__.__name__[:-8]
  697. raise ValueError(
  698. "\nThe number of bins in a %s distribution " % name
  699. + "(%d) must be greater than or equal to\n" % bins
  700. + "the number of bins in the FreqDist used "
  701. + "to create it (%d)." % freqdist.B()
  702. )
  703. self._freqdist = freqdist
  704. self._gamma = float(gamma)
  705. self._N = self._freqdist.N()
  706. if bins is None:
  707. bins = freqdist.B()
  708. self._bins = bins
  709. self._divisor = self._N + bins * gamma
  710. if self._divisor == 0.0:
  711. # In extreme cases we force the probability to be 0,
  712. # which it will be, since the count will be 0:
  713. self._gamma = 0
  714. self._divisor = 1
  715. def freqdist(self):
  716. """
  717. Return the frequency distribution that this probability
  718. distribution is based on.
  719. :rtype: FreqDist
  720. """
  721. return self._freqdist
  722. def prob(self, sample):
  723. c = self._freqdist[sample]
  724. return (c + self._gamma) / self._divisor
  725. def max(self):
  726. # For Lidstone distributions, probability is monotonic with
  727. # frequency, so the most probable sample is the one that
  728. # occurs most frequently.
  729. return self._freqdist.max()
  730. def samples(self):
  731. return self._freqdist.keys()
  732. def discount(self):
  733. gb = self._gamma * self._bins
  734. return gb / (self._N + gb)
  735. def __repr__(self):
  736. """
  737. Return a string representation of this ``ProbDist``.
  738. :rtype: str
  739. """
  740. return "<LidstoneProbDist based on %d samples>" % self._freqdist.N()
  741. class LaplaceProbDist(LidstoneProbDist):
  742. """
  743. The Laplace estimate for the probability distribution of the
  744. experiment used to generate a frequency distribution. The
  745. "Laplace estimate" approximates the probability of a sample with
  746. count *c* from an experiment with *N* outcomes and *B* bins as
  747. *(c+1)/(N+B)*. This is equivalent to adding one to the count for
  748. each bin, and taking the maximum likelihood estimate of the
  749. resulting frequency distribution.
  750. """
  751. def __init__(self, freqdist, bins=None):
  752. """
  753. Use the Laplace estimate to create a probability distribution
  754. for the experiment used to generate ``freqdist``.
  755. :type freqdist: FreqDist
  756. :param freqdist: The frequency distribution that the
  757. probability estimates should be based on.
  758. :type bins: int
  759. :param bins: The number of sample values that can be generated
  760. by the experiment that is described by the probability
  761. distribution. This value must be correctly set for the
  762. probabilities of the sample values to sum to one. If
  763. ``bins`` is not specified, it defaults to ``freqdist.B()``.
  764. """
  765. LidstoneProbDist.__init__(self, freqdist, 1, bins)
  766. def __repr__(self):
  767. """
  768. :rtype: str
  769. :return: A string representation of this ``ProbDist``.
  770. """
  771. return "<LaplaceProbDist based on %d samples>" % self._freqdist.N()
  772. class ELEProbDist(LidstoneProbDist):
  773. """
  774. The expected likelihood estimate for the probability distribution
  775. of the experiment used to generate a frequency distribution. The
  776. "expected likelihood estimate" approximates the probability of a
  777. sample with count *c* from an experiment with *N* outcomes and
  778. *B* bins as *(c+0.5)/(N+B/2)*. This is equivalent to adding 0.5
  779. to the count for each bin, and taking the maximum likelihood
  780. estimate of the resulting frequency distribution.
  781. """
  782. def __init__(self, freqdist, bins=None):
  783. """
  784. Use the expected likelihood estimate to create a probability
  785. distribution for the experiment used to generate ``freqdist``.
  786. :type freqdist: FreqDist
  787. :param freqdist: The frequency distribution that the
  788. probability estimates should be based on.
  789. :type bins: int
  790. :param bins: The number of sample values that can be generated
  791. by the experiment that is described by the probability
  792. distribution. This value must be correctly set for the
  793. probabilities of the sample values to sum to one. If
  794. ``bins`` is not specified, it defaults to ``freqdist.B()``.
  795. """
  796. LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
  797. def __repr__(self):
  798. """
  799. Return a string representation of this ``ProbDist``.
  800. :rtype: str
  801. """
  802. return "<ELEProbDist based on %d samples>" % self._freqdist.N()
  803. class HeldoutProbDist(ProbDistI):
  804. """
  805. The heldout estimate for the probability distribution of the
  806. experiment used to generate two frequency distributions. These
  807. two frequency distributions are called the "heldout frequency
  808. distribution" and the "base frequency distribution." The
  809. "heldout estimate" uses uses the "heldout frequency
  810. distribution" to predict the probability of each sample, given its
  811. frequency in the "base frequency distribution".
  812. In particular, the heldout estimate approximates the probability
  813. for a sample that occurs *r* times in the base distribution as
  814. the average frequency in the heldout distribution of all samples
  815. that occur *r* times in the base distribution.
  816. This average frequency is *Tr[r]/(Nr[r].N)*, where:
  817. - *Tr[r]* is the total count in the heldout distribution for
  818. all samples that occur *r* times in the base distribution.
  819. - *Nr[r]* is the number of samples that occur *r* times in
  820. the base distribution.
  821. - *N* is the number of outcomes recorded by the heldout
  822. frequency distribution.
  823. In order to increase the efficiency of the ``prob`` member
  824. function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
  825. when the ``HeldoutProbDist`` is created.
  826. :type _estimate: list(float)
  827. :ivar _estimate: A list mapping from *r*, the number of
  828. times that a sample occurs in the base distribution, to the
  829. probability estimate for that sample. ``_estimate[r]`` is
  830. calculated by finding the average frequency in the heldout
  831. distribution of all samples that occur *r* times in the base
  832. distribution. In particular, ``_estimate[r]`` =
  833. *Tr[r]/(Nr[r].N)*.
  834. :type _max_r: int
  835. :ivar _max_r: The maximum number of times that any sample occurs
  836. in the base distribution. ``_max_r`` is used to decide how
  837. large ``_estimate`` must be.
  838. """
  839. SUM_TO_ONE = False
  840. def __init__(self, base_fdist, heldout_fdist, bins=None):
  841. """
  842. Use the heldout estimate to create a probability distribution
  843. for the experiment used to generate ``base_fdist`` and
  844. ``heldout_fdist``.
  845. :type base_fdist: FreqDist
  846. :param base_fdist: The base frequency distribution.
  847. :type heldout_fdist: FreqDist
  848. :param heldout_fdist: The heldout frequency distribution.
  849. :type bins: int
  850. :param bins: The number of sample values that can be generated
  851. by the experiment that is described by the probability
  852. distribution. This value must be correctly set for the
  853. probabilities of the sample values to sum to one. If
  854. ``bins`` is not specified, it defaults to ``freqdist.B()``.
  855. """
  856. self._base_fdist = base_fdist
  857. self._heldout_fdist = heldout_fdist
  858. # The max number of times any sample occurs in base_fdist.
  859. self._max_r = base_fdist[base_fdist.max()]
  860. # Calculate Tr, Nr, and N.
  861. Tr = self._calculate_Tr()
  862. r_Nr = base_fdist.r_Nr(bins)
  863. Nr = [r_Nr[r] for r in range(self._max_r + 1)]
  864. N = heldout_fdist.N()
  865. # Use Tr, Nr, and N to compute the probability estimate for
  866. # each value of r.
  867. self._estimate = self._calculate_estimate(Tr, Nr, N)
  868. def _calculate_Tr(self):
  869. """
  870. Return the list *Tr*, where *Tr[r]* is the total count in
  871. ``heldout_fdist`` for all samples that occur *r*
  872. times in ``base_fdist``.
  873. :rtype: list(float)
  874. """
  875. Tr = [0.0] * (self._max_r + 1)
  876. for sample in self._heldout_fdist:
  877. r = self._base_fdist[sample]
  878. Tr[r] += self._heldout_fdist[sample]
  879. return Tr
  880. def _calculate_estimate(self, Tr, Nr, N):
  881. """
  882. Return the list *estimate*, where *estimate[r]* is the probability
  883. estimate for any sample that occurs *r* times in the base frequency
  884. distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
  885. In the special case that *N[r]=0*, *estimate[r]* will never be used;
  886. so we define *estimate[r]=None* for those cases.
  887. :rtype: list(float)
  888. :type Tr: list(float)
  889. :param Tr: the list *Tr*, where *Tr[r]* is the total count in
  890. the heldout distribution for all samples that occur *r*
  891. times in base distribution.
  892. :type Nr: list(float)
  893. :param Nr: The list *Nr*, where *Nr[r]* is the number of
  894. samples that occur *r* times in the base distribution.
  895. :type N: int
  896. :param N: The total number of outcomes recorded by the heldout
  897. frequency distribution.
  898. """
  899. estimate = []
  900. for r in range(self._max_r + 1):
  901. if Nr[r] == 0:
  902. estimate.append(None)
  903. else:
  904. estimate.append(Tr[r] / (Nr[r] * N))
  905. return estimate
  906. def base_fdist(self):
  907. """
  908. Return the base frequency distribution that this probability
  909. distribution is based on.
  910. :rtype: FreqDist
  911. """
  912. return self._base_fdist
  913. def heldout_fdist(self):
  914. """
  915. Return the heldout frequency distribution that this
  916. probability distribution is based on.
  917. :rtype: FreqDist
  918. """
  919. return self._heldout_fdist
  920. def samples(self):
  921. return self._base_fdist.keys()
  922. def prob(self, sample):
  923. # Use our precomputed probability estimate.
  924. r = self._base_fdist[sample]
  925. return self._estimate[r]
  926. def max(self):
  927. # Note: the Heldout estimation is *not* necessarily monotonic;
  928. # so this implementation is currently broken. However, it
  929. # should give the right answer *most* of the time. :)
  930. return self._base_fdist.max()
  931. def discount(self):
  932. raise NotImplementedError()
  933. def __repr__(self):
  934. """
  935. :rtype: str
  936. :return: A string representation of this ``ProbDist``.
  937. """
  938. s = "<HeldoutProbDist: %d base samples; %d heldout samples>"
  939. return s % (self._base_fdist.N(), self._heldout_fdist.N())
  940. class CrossValidationProbDist(ProbDistI):
  941. """
  942. The cross-validation estimate for the probability distribution of
  943. the experiment used to generate a set of frequency distribution.
  944. The "cross-validation estimate" for the probability of a sample
  945. is found by averaging the held-out estimates for the sample in
  946. each pair of frequency distributions.
  947. """
  948. SUM_TO_ONE = False
  949. def __init__(self, freqdists, bins):
  950. """
  951. Use the cross-validation estimate to create a probability
  952. distribution for the experiment used to generate
  953. ``freqdists``.
  954. :type freqdists: list(FreqDist)
  955. :param freqdists: A list of the frequency distributions
  956. generated by the experiment.
  957. :type bins: int
  958. :param bins: The number of sample values that can be generated
  959. by the experiment that is described by the probability
  960. distribution. This value must be correctly set for the
  961. probabilities of the sample values to sum to one. If
  962. ``bins`` is not specified, it defaults to ``freqdist.B()``.
  963. """
  964. self._freqdists = freqdists
  965. # Create a heldout probability distribution for each pair of
  966. # frequency distributions in freqdists.
  967. self._heldout_probdists = []
  968. for fdist1 in freqdists:
  969. for fdist2 in freqdists:
  970. if fdist1 is not fdist2:
  971. probdist = HeldoutProbDist(fdist1, fdist2, bins)
  972. self._heldout_probdists.append(probdist)
  973. def freqdists(self):
  974. """
  975. Return the list of frequency distributions that this ``ProbDist`` is based on.
  976. :rtype: list(FreqDist)
  977. """
  978. return self._freqdists
  979. def samples(self):
  980. # [xx] nb: this is not too efficient
  981. return set(sum([list(fd) for fd in self._freqdists], []))
  982. def prob(self, sample):
  983. # Find the average probability estimate returned by each
  984. # heldout distribution.
  985. prob = 0.0
  986. for heldout_probdist in self._heldout_probdists:
  987. prob += heldout_probdist.prob(sample)
  988. return prob / len(self._heldout_probdists)
  989. def discount(self):
  990. raise NotImplementedError()
  991. def __repr__(self):
  992. """
  993. Return a string representation of this ``ProbDist``.
  994. :rtype: str
  995. """
  996. return "<CrossValidationProbDist: %d-way>" % len(self._freqdists)
  997. class WittenBellProbDist(ProbDistI):
  998. """
  999. The Witten-Bell estimate of a probability distribution. This distribution
  1000. allocates uniform probability mass to as yet unseen events by using the
  1001. number of events that have only been seen once. The probability mass
  1002. reserved for unseen events is equal to *T / (N + T)*
  1003. where *T* is the number of observed event types and *N* is the total
  1004. number of observed events. This equates to the maximum likelihood estimate
  1005. of a new type event occurring. The remaining probability mass is discounted
  1006. such that all probability estimates sum to one, yielding:
  1007. - *p = T / Z (N + T)*, if count = 0
  1008. - *p = c / (N + T)*, otherwise
  1009. """
  1010. def __init__(self, freqdist, bins=None):
  1011. """
  1012. Creates a distribution of Witten-Bell probability estimates. This
  1013. distribution allocates uniform probability mass to as yet unseen
  1014. events by using the number of events that have only been seen once. The
  1015. probability mass reserved for unseen events is equal to *T / (N + T)*
  1016. where *T* is the number of observed event types and *N* is the total
  1017. number of observed events. This equates to the maximum likelihood
  1018. estimate of a new type event occurring. The remaining probability mass
  1019. is discounted such that all probability estimates sum to one,
  1020. yielding:
  1021. - *p = T / Z (N + T)*, if count = 0
  1022. - *p = c / (N + T)*, otherwise
  1023. The parameters *T* and *N* are taken from the ``freqdist`` parameter
  1024. (the ``B()`` and ``N()`` values). The normalizing factor *Z* is
  1025. calculated using these values along with the ``bins`` parameter.
  1026. :param freqdist: The frequency counts upon which to base the
  1027. estimation.
  1028. :type freqdist: FreqDist
  1029. :param bins: The number of possible event types. This must be at least
  1030. as large as the number of bins in the ``freqdist``. If None, then
  1031. it's assumed to be equal to that of the ``freqdist``
  1032. :type bins: int
  1033. """
  1034. assert bins is None or bins >= freqdist.B(), (
  1035. "bins parameter must not be less than %d=freqdist.B()" % freqdist.B()
  1036. )
  1037. if bins is None:
  1038. bins = freqdist.B()
  1039. self._freqdist = freqdist
  1040. self._T = self._freqdist.B()
  1041. self._Z = bins - self._freqdist.B()
  1042. self._N = self._freqdist.N()
  1043. # self._P0 is P(0), precalculated for efficiency:
  1044. if self._N == 0:
  1045. # if freqdist is empty, we approximate P(0) by a UniformProbDist:
  1046. self._P0 = 1.0 / self._Z
  1047. else:
  1048. self._P0 = self._T / (self._Z * (self._N + self._T))
  1049. def prob(self, sample):
  1050. # inherit docs from ProbDistI
  1051. c = self._freqdist[sample]
  1052. return c / (self._N + self._T) if c != 0 else self._P0
  1053. def max(self):
  1054. return self._freqdist.max()
  1055. def samples(self):
  1056. return self._freqdist.keys()
  1057. def freqdist(self):
  1058. return self._freqdist
  1059. def discount(self):
  1060. raise NotImplementedError()
  1061. def __repr__(self):
  1062. """
  1063. Return a string representation of this ``ProbDist``.
  1064. :rtype: str
  1065. """
  1066. return "<WittenBellProbDist based on %d samples>" % self._freqdist.N()
  1067. ##//////////////////////////////////////////////////////
  1068. ## Good-Turing Probability Distributions
  1069. ##//////////////////////////////////////////////////////
  1070. # Good-Turing frequency estimation was contributed by Alan Turing and
  1071. # his statistical assistant I.J. Good, during their collaboration in
  1072. # the WWII. It is a statistical technique for predicting the
  1073. # probability of occurrence of objects belonging to an unknown number
  1074. # of species, given past observations of such objects and their
  1075. # species. (In drawing balls from an urn, the 'objects' would be balls
  1076. # and the 'species' would be the distinct colors of the balls (finite
  1077. # but unknown in number).
  1078. #
  1079. # Good-Turing method calculates the probability mass to assign to
  1080. # events with zero or low counts based on the number of events with
  1081. # higher counts. It does so by using the adjusted count *c\**:
  1082. #
  1083. # - *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
  1084. # - *things with frequency zero in training* = N(1) for c == 0
  1085. #
  1086. # where *c* is the original count, *N(i)* is the number of event types
  1087. # observed with count *i*. We can think the count of unseen as the count
  1088. # of frequency one (see Jurafsky & Martin 2nd Edition, p101).
  1089. #
  1090. # This method is problematic because the situation ``N(c+1) == 0``
  1091. # is quite common in the original Good-Turing estimation; smoothing or
  1092. # interpolation of *N(i)* values is essential in practice.
  1093. #
  1094. # Bill Gale and Geoffrey Sampson present a simple and effective approach,
  1095. # Simple Good-Turing. As a smoothing curve they simply use a power curve:
  1096. #
  1097. # Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
  1098. # relationship)
  1099. #
  1100. # They estimate a and b by simple linear regression technique on the
  1101. # logarithmic form of the equation:
  1102. #
  1103. # log Nr = a + b*log(r)
  1104. #
  1105. # However, they suggest that such a simple curve is probably only
  1106. # appropriate for high values of r. For low values of r, they use the
  1107. # measured Nr directly. (see M&S, p.213)
  1108. #
  1109. # Gale and Sampson propose to use r while the difference between r and
  1110. # r* is 1.96 greater than the standard deviation, and switch to r* if
  1111. # it is less or equal:
  1112. #
  1113. # |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
  1114. #
  1115. # The 1.96 coefficient correspond to a 0.05 significance criterion,
  1116. # some implementations can use a coefficient of 1.65 for a 0.1
  1117. # significance criterion.
  1118. #
  1119. ##//////////////////////////////////////////////////////
  1120. ## Simple Good-Turing Probablity Distributions
  1121. ##//////////////////////////////////////////////////////
  1122. class SimpleGoodTuringProbDist(ProbDistI):
  1123. """
  1124. SimpleGoodTuring ProbDist approximates from frequency to frequency of
  1125. frequency into a linear line under log space by linear regression.
  1126. Details of Simple Good-Turing algorithm can be found in:
  1127. - Good Turing smoothing without tears" (Gale & Sampson 1995),
  1128. Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
  1129. - "Speech and Language Processing (Jurafsky & Martin),
  1130. 2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
  1131. - http://www.grsampson.net/RGoodTur.html
  1132. Given a set of pair (xi, yi), where the xi denotes the frequency and
  1133. yi denotes the frequency of frequency, we want to minimize their
  1134. square variation. E(x) and E(y) represent the mean of xi and yi.
  1135. - slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
  1136. - intercept: a = E(y) - b.E(x)
  1137. """
  1138. SUM_TO_ONE = False
  1139. def __init__(self, freqdist, bins=None):
  1140. """
  1141. :param freqdist: The frequency counts upon which to base the
  1142. estimation.
  1143. :type freqdist: FreqDist
  1144. :param bins: The number of possible event types. This must be
  1145. larger than the number of bins in the ``freqdist``. If None,
  1146. then it's assumed to be equal to ``freqdist``.B() + 1
  1147. :type bins: int
  1148. """
  1149. assert (
  1150. bins is None or bins > freqdist.B()
  1151. ), "bins parameter must not be less than %d=freqdist.B()+1" % (freqdist.B() + 1)
  1152. if bins is None:
  1153. bins = freqdist.B() + 1
  1154. self._freqdist = freqdist
  1155. self._bins = bins
  1156. r, nr = self._r_Nr()
  1157. self.find_best_fit(r, nr)
  1158. self._switch(r, nr)
  1159. self._renormalize(r, nr)
  1160. def _r_Nr_non_zero(self):
  1161. r_Nr = self._freqdist.r_Nr()
  1162. del r_Nr[0]
  1163. return r_Nr
  1164. def _r_Nr(self):
  1165. """
  1166. Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
  1167. """
  1168. nonzero = self._r_Nr_non_zero()
  1169. if not nonzero:
  1170. return [], []
  1171. return zip(*sorted(nonzero.items()))
  1172. def find_best_fit(self, r, nr):
  1173. """
  1174. Use simple linear regression to tune parameters self._slope and
  1175. self._intercept in the log-log space based on count and Nr(count)
  1176. (Work in log space to avoid floating point underflow.)
  1177. """
  1178. # For higher sample frequencies the data points becomes horizontal
  1179. # along line Nr=1. To create a more evident linear model in log-log
  1180. # space, we average positive Nr values with the surrounding zero
  1181. # values. (Church and Gale, 1991)
  1182. if not r or not nr:
  1183. # Empty r or nr?
  1184. return
  1185. zr = []
  1186. for j in range(len(r)):
  1187. i = r[j - 1] if j > 0 else 0
  1188. k = 2 * r[j] - i if j == len(r) - 1 else r[j + 1]
  1189. zr_ = 2.0 * nr[j] / (k - i)
  1190. zr.append(zr_)
  1191. log_r = [math.log(i) for i in r]
  1192. log_zr = [math.log(i) for i in zr]
  1193. xy_cov = x_var = 0.0
  1194. x_mean = sum(log_r) / len(log_r)
  1195. y_mean = sum(log_zr) / len(log_zr)
  1196. for (x, y) in zip(log_r, log_zr):
  1197. xy_cov += (x - x_mean) * (y - y_mean)
  1198. x_var += (x - x_mean) ** 2
  1199. self._slope = xy_cov / x_var if x_var != 0 else 0.0
  1200. if self._slope >= -1:
  1201. warnings.warn(
  1202. "SimpleGoodTuring did not find a proper best fit "
  1203. "line for smoothing probabilities of occurrences. "
  1204. "The probability estimates are likely to be "
  1205. "unreliable."
  1206. )
  1207. self._intercept = y_mean - self._slope * x_mean
  1208. def _switch(self, r, nr):
  1209. """
  1210. Calculate the r frontier where we must switch from Nr to Sr
  1211. when estimating E[Nr].
  1212. """
  1213. for i, r_ in enumerate(r):
  1214. if len(r) == i + 1 or r[i + 1] != r_ + 1:
  1215. # We are at the end of r, or there is a gap in r
  1216. self._switch_at = r_
  1217. break
  1218. Sr = self.smoothedNr
  1219. smooth_r_star = (r_ + 1) * Sr(r_ + 1) / Sr(r_)
  1220. unsmooth_r_star = (r_ + 1) * nr[i + 1] / nr[i]
  1221. std = math.sqrt(self._variance(r_, nr[i], nr[i + 1]))
  1222. if abs(unsmooth_r_star - smooth_r_star) <= 1.96 * std:
  1223. self._switch_at = r_
  1224. break
  1225. def _variance(self, r, nr, nr_1):
  1226. r = float(r)
  1227. nr = float(nr)
  1228. nr_1 = float(nr_1)
  1229. return (r + 1.0) ** 2 * (nr_1 / nr ** 2) * (1.0 + nr_1 / nr)
  1230. def _renormalize(self, r, nr):
  1231. """
  1232. It is necessary to renormalize all the probability estimates to
  1233. ensure a proper probability distribution results. This can be done
  1234. by keeping the estimate of the probability mass for unseen items as
  1235. N(1)/N and renormalizing all the estimates for previously seen items
  1236. (as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
  1237. """
  1238. prob_cov = 0.0
  1239. for r_, nr_ in zip(r, nr):
  1240. prob_cov += nr_ * self._prob_measure(r_)
  1241. if prob_cov:
  1242. self._renormal = (1 - self._prob_measure(0)) / prob_cov
  1243. def smoothedNr(self, r):
  1244. """
  1245. Return the number of samples with count r.
  1246. :param r: The amount of frequency.
  1247. :type r: int
  1248. :rtype: float
  1249. """
  1250. # Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
  1251. # relationship)
  1252. # Estimate a and b by simple linear regression technique on
  1253. # the logarithmic form of the equation: log Nr = a + b*log(r)
  1254. return math.exp(self._intercept + self._slope * math.log(r))
  1255. def prob(self, sample):
  1256. """
  1257. Return the sample's probability.
  1258. :param sample: sample of the event
  1259. :type sample: str
  1260. :rtype: float
  1261. """
  1262. count = self._freqdist[sample]
  1263. p = self._prob_measure(count)
  1264. if count == 0:
  1265. if self._bins == self._freqdist.B():
  1266. p = 0.0
  1267. else:
  1268. p = p / (self._bins - self._freqdist.B())
  1269. else:
  1270. p = p * self._renormal
  1271. return p
  1272. def _prob_measure(self, count):
  1273. if count == 0 and self._freqdist.N() == 0:
  1274. return 1.0
  1275. elif count == 0 and self._freqdist.N() != 0:
  1276. return self._freqdist.Nr(1) / self._freqdist.N()
  1277. if self._switch_at > count:
  1278. Er_1 = self._freqdist.Nr(count + 1)
  1279. Er = self._freqdist.Nr(count)
  1280. else:
  1281. Er_1 = self.smoothedNr(count + 1)
  1282. Er = self.smoothedNr(count)
  1283. r_star = (count + 1) * Er_1 / Er
  1284. return r_star / self._freqdist.N()
  1285. def check(self):
  1286. prob_sum = 0.0
  1287. for i in range(0, len(self._Nr)):
  1288. prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
  1289. print("Probability Sum:", prob_sum)
  1290. # assert prob_sum != 1.0, "probability sum should be one!"
  1291. def discount(self):
  1292. """
  1293. This function returns the total mass of probability transfers from the
  1294. seen samples to the unseen samples.
  1295. """
  1296. return self.smoothedNr(1) / self._freqdist.N()
  1297. def max(self):
  1298. return self._freqdist.max()
  1299. def samples(self):
  1300. return self._freqdist.keys()
  1301. def freqdist(self):
  1302. return self._freqdist
  1303. def __repr__(self):
  1304. """
  1305. Return a string representation of this ``ProbDist``.
  1306. :rtype: str
  1307. """
  1308. return "<SimpleGoodTuringProbDist based on %d samples>" % self._freqdist.N()
  1309. class MutableProbDist(ProbDistI):
  1310. """
  1311. An mutable probdist where the probabilities may be easily modified. This
  1312. simply copies an existing probdist, storing the probability values in a
  1313. mutable dictionary and providing an update method.
  1314. """
  1315. def __init__(self, prob_dist, samples, store_logs=True):
  1316. """
  1317. Creates the mutable probdist based on the given prob_dist and using
  1318. the list of samples given. These values are stored as log
  1319. probabilities if the store_logs flag is set.
  1320. :param prob_dist: the distribution from which to garner the
  1321. probabilities
  1322. :type prob_dist: ProbDist
  1323. :param samples: the complete set of samples
  1324. :type samples: sequence of any
  1325. :param store_logs: whether to store the probabilities as logarithms
  1326. :type store_logs: bool
  1327. """
  1328. self._samples = samples
  1329. self._sample_dict = dict((samples[i], i) for i in range(len(samples)))
  1330. self._data = array.array(str("d"), [0.0]) * len(samples)
  1331. for i in range(len(samples)):
  1332. if store_logs:
  1333. self._data[i] = prob_dist.logprob(samples[i])
  1334. else:
  1335. self._data[i] = prob_dist.prob(samples[i])
  1336. self._logs = store_logs
  1337. def max(self):
  1338. # inherit documentation
  1339. return max((p, v) for (v, p) in self._sample_dict.items())[1]
  1340. def samples(self):
  1341. # inherit documentation
  1342. return self._samples
  1343. def prob(self, sample):
  1344. # inherit documentation
  1345. i = self._sample_dict.get(sample)
  1346. if i is None:
  1347. return 0.0
  1348. return 2 ** (self._data[i]) if self._logs else self._data[i]
  1349. def logprob(self, sample):
  1350. # inherit documentation
  1351. i = self._sample_dict.get(sample)
  1352. if i is None:
  1353. return float("-inf")
  1354. return self._data[i] if self._logs else math.log(self._data[i], 2)
  1355. def update(self, sample, prob, log=True):
  1356. """
  1357. Update the probability for the given sample. This may cause the object
  1358. to stop being the valid probability distribution - the user must
  1359. ensure that they update the sample probabilities such that all samples
  1360. have probabilities between 0 and 1 and that all probabilities sum to
  1361. one.
  1362. :param sample: the sample for which to update the probability
  1363. :type sample: any
  1364. :param prob: the new probability
  1365. :type prob: float
  1366. :param log: is the probability already logged
  1367. :type log: bool
  1368. """
  1369. i = self._sample_dict.get(sample)
  1370. assert i is not None
  1371. if self._logs:
  1372. self._data[i] = prob if log else math.log(prob, 2)
  1373. else:
  1374. self._data[i] = 2 ** (prob) if log else prob
  1375. ##/////////////////////////////////////////////////////
  1376. ## Kneser-Ney Probability Distribution
  1377. ##//////////////////////////////////////////////////////
  1378. # This method for calculating probabilities was introduced in 1995 by Reinhard
  1379. # Kneser and Hermann Ney. It was meant to improve the accuracy of language
  1380. # models that use backing-off to deal with sparse data. The authors propose two
  1381. # ways of doing so: a marginal distribution constraint on the back-off
  1382. # distribution and a leave-one-out distribution. For a start, the first one is
  1383. # implemented as a class below.
  1384. #
  1385. # The idea behind a back-off n-gram model is that we have a series of
  1386. # frequency distributions for our n-grams so that in case we have not seen a
  1387. # given n-gram during training (and as a result have a 0 probability for it) we
  1388. # can 'back off' (hence the name!) and try testing whether we've seen the
  1389. # n-1-gram part of the n-gram in training.
  1390. #
  1391. # The novelty of Kneser and Ney's approach was that they decided to fiddle
  1392. # around with the way this latter, backed off probability was being calculated
  1393. # whereas their peers seemed to focus on the primary probability.
  1394. #
  1395. # The implementation below uses one of the techniques described in their paper
  1396. # titled "Improved backing-off for n-gram language modeling." In the same paper
  1397. # another technique is introduced to attempt to smooth the back-off
  1398. # distribution as well as the primary one. There is also a much-cited
  1399. # modification of this method proposed by Chen and Goodman.
  1400. #
  1401. # In order for the implementation of Kneser-Ney to be more efficient, some
  1402. # changes have been made to the original algorithm. Namely, the calculation of
  1403. # the normalizing function gamma has been significantly simplified and
  1404. # combined slightly differently with beta. None of these changes affect the
  1405. # nature of the algorithm, but instead aim to cut out unnecessary calculations
  1406. # and take advantage of storing and retrieving information in dictionaries
  1407. # where possible.
  1408. class KneserNeyProbDist(ProbDistI):
  1409. """
  1410. Kneser-Ney estimate of a probability distribution. This is a version of
  1411. back-off that counts how likely an n-gram is provided the n-1-gram had
  1412. been seen in training. Extends the ProbDistI interface, requires a trigram
  1413. FreqDist instance to train on. Optionally, a different from default discount
  1414. value can be specified. The default discount is set to 0.75.
  1415. """
  1416. def __init__(self, freqdist, bins=None, discount=0.75):
  1417. """
  1418. :param freqdist: The trigram frequency distribution upon which to base
  1419. the estimation
  1420. :type freqdist: FreqDist
  1421. :param bins: Included for compatibility with nltk.tag.hmm
  1422. :type bins: int or float
  1423. :param discount: The discount applied when retrieving counts of
  1424. trigrams
  1425. :type discount: float (preferred, but can be set to int)
  1426. """
  1427. if not bins:
  1428. self._bins = freqdist.B()
  1429. else:
  1430. self._bins = bins
  1431. self._D = discount
  1432. # cache for probability calculation
  1433. self._cache = {}
  1434. # internal bigram and trigram frequency distributions
  1435. self._bigrams = defaultdict(int)
  1436. self._trigrams = freqdist
  1437. # helper dictionaries used to calculate probabilities
  1438. self._wordtypes_after = defaultdict(float)
  1439. self._trigrams_contain = defaultdict(float)
  1440. self._wordtypes_before = defaultdict(float)
  1441. for w0, w1, w2 in freqdist:
  1442. self._bigrams[(w0, w1)] += freqdist[(w0, w1, w2)]
  1443. self._wordtypes_after[(w0, w1)] += 1
  1444. self._trigrams_contain[w1] += 1
  1445. self._wordtypes_before[(w1, w2)] += 1
  1446. def prob(self, trigram):
  1447. # sample must be a triple
  1448. if len(trigram) != 3:
  1449. raise ValueError("Expected an iterable with 3 members.")
  1450. trigram = tuple(trigram)
  1451. w0, w1, w2 = trigram
  1452. if trigram in self._cache:
  1453. return self._cache[trigram]
  1454. else:
  1455. # if the sample trigram was seen during training
  1456. if trigram in self._trigrams:
  1457. prob = (self._trigrams[trigram] - self.discount()) / self._bigrams[
  1458. (w0, w1)
  1459. ]
  1460. # else if the 'rougher' environment was seen during training
  1461. elif (w0, w1) in self._bigrams and (w1, w2) in self._wordtypes_before:
  1462. aftr = self._wordtypes_after[(w0, w1)]
  1463. bfr = self._wordtypes_before[(w1, w2)]
  1464. # the probability left over from alphas
  1465. leftover_prob = (aftr * self.discount()) / self._bigrams[(w0, w1)]
  1466. # the beta (including normalization)
  1467. beta = bfr / (self._trigrams_contain[w1] - aftr)
  1468. prob = leftover_prob * beta
  1469. # else the sample was completely unseen during training
  1470. else:
  1471. prob = 0.0
  1472. self._cache[trigram] = prob
  1473. return prob
  1474. def discount(self):
  1475. """
  1476. Return the value by which counts are discounted. By default set to 0.75.
  1477. :rtype: float
  1478. """
  1479. return self._D
  1480. def set_discount(self, discount):
  1481. """
  1482. Set the value by which counts are discounted to the value of discount.
  1483. :param discount: the new value to discount counts by
  1484. :type discount: float (preferred, but int possible)
  1485. :rtype: None
  1486. """
  1487. self._D = discount
  1488. def samples(self):
  1489. return self._trigrams.keys()
  1490. def max(self):
  1491. return self._trigrams.max()
  1492. def __repr__(self):
  1493. """
  1494. Return a string representation of this ProbDist
  1495. :rtype: str
  1496. """
  1497. return "<KneserNeyProbDist based on {0} trigrams".format(self._trigrams.N())
  1498. ##//////////////////////////////////////////////////////
  1499. ## Probability Distribution Operations
  1500. ##//////////////////////////////////////////////////////
  1501. def log_likelihood(test_pdist, actual_pdist):
  1502. if not isinstance(test_pdist, ProbDistI) or not isinstance(actual_pdist, ProbDistI):
  1503. raise ValueError("expected a ProbDist.")
  1504. # Is this right?
  1505. return sum(
  1506. actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2) for s in actual_pdist
  1507. )
  1508. def entropy(pdist):
  1509. probs = (pdist.prob(s) for s in pdist.samples())
  1510. return -sum(p * math.log(p, 2) for p in probs)
  1511. ##//////////////////////////////////////////////////////
  1512. ## Conditional Distributions
  1513. ##//////////////////////////////////////////////////////
  1514. class ConditionalFreqDist(defaultdict):
  1515. """
  1516. A collection of frequency distributions for a single experiment
  1517. run under different conditions. Conditional frequency
  1518. distributions are used to record the number of times each sample
  1519. occurred, given the condition under which the experiment was run.
  1520. For example, a conditional frequency distribution could be used to
  1521. record the frequency of each word (type) in a document, given its
  1522. length. Formally, a conditional frequency distribution can be
  1523. defined as a function that maps from each condition to the
  1524. FreqDist for the experiment under that condition.
  1525. Conditional frequency distributions are typically constructed by
  1526. repeatedly running an experiment under a variety of conditions,
  1527. and incrementing the sample outcome counts for the appropriate
  1528. conditions. For example, the following code will produce a
  1529. conditional frequency distribution that encodes how often each
  1530. word type occurs, given the length of that word type:
  1531. >>> from nltk.probability import ConditionalFreqDist
  1532. >>> from nltk.tokenize import word_tokenize
  1533. >>> sent = "the the the dog dog some other words that we do not care about"
  1534. >>> cfdist = ConditionalFreqDist()
  1535. >>> for word in word_tokenize(sent):
  1536. ... condition = len(word)
  1537. ... cfdist[condition][word] += 1
  1538. An equivalent way to do this is with the initializer:
  1539. >>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
  1540. The frequency distribution for each condition is accessed using
  1541. the indexing operator:
  1542. >>> cfdist[3]
  1543. FreqDist({'the': 3, 'dog': 2, 'not': 1})
  1544. >>> cfdist[3].freq('the')
  1545. 0.5
  1546. >>> cfdist[3]['dog']
  1547. 2
  1548. When the indexing operator is used to access the frequency
  1549. distribution for a condition that has not been accessed before,
  1550. ``ConditionalFreqDist`` creates a new empty FreqDist for that
  1551. condition.
  1552. """
  1553. def __init__(self, cond_samples=None):
  1554. """
  1555. Construct a new empty conditional frequency distribution. In
  1556. particular, the count for every sample, under every condition,
  1557. is zero.
  1558. :param cond_samples: The samples to initialize the conditional
  1559. frequency distribution with
  1560. :type cond_samples: Sequence of (condition, sample) tuples
  1561. """
  1562. defaultdict.__init__(self, FreqDist)
  1563. if cond_samples:
  1564. for (cond, sample) in cond_samples:
  1565. self[cond][sample] += 1
  1566. def __reduce__(self):
  1567. kv_pairs = ((cond, self[cond]) for cond in self.conditions())
  1568. return (self.__class__, (), None, None, kv_pairs)
  1569. def conditions(self):
  1570. """
  1571. Return a list of the conditions that have been accessed for
  1572. this ``ConditionalFreqDist``. Use the indexing operator to
  1573. access the frequency distribution for a given condition.
  1574. Note that the frequency distributions for some conditions
  1575. may contain zero sample outcomes.
  1576. :rtype: list
  1577. """
  1578. return list(self.keys())
  1579. def N(self):
  1580. """
  1581. Return the total number of sample outcomes that have been
  1582. recorded by this ``ConditionalFreqDist``.
  1583. :rtype: int
  1584. """
  1585. return sum(fdist.N() for fdist in self.values())
  1586. def plot(self, *args, **kwargs):
  1587. """
  1588. Plot the given samples from the conditional frequency distribution.
  1589. For a cumulative plot, specify cumulative=True.
  1590. (Requires Matplotlib to be installed.)
  1591. :param samples: The samples to plot
  1592. :type samples: list
  1593. :param title: The title for the graph
  1594. :type title: str
  1595. :param conditions: The conditions to plot (default is all)
  1596. :type conditions: list
  1597. """
  1598. try:
  1599. import matplotlib.pyplot as plt #import statment fix
  1600. except ImportError:
  1601. raise ValueError(
  1602. "The plot function requires matplotlib to be installed."
  1603. "See http://matplotlib.org/"
  1604. )
  1605. cumulative = _get_kwarg(kwargs, 'cumulative', False)
  1606. percents = _get_kwarg(kwargs, 'percents', False)
  1607. conditions = [c for c in _get_kwarg(kwargs, 'conditions', self.conditions()) if c in self] # conditions should be in self
  1608. title = _get_kwarg(kwargs, 'title', '')
  1609. samples = _get_kwarg(
  1610. kwargs, 'samples', sorted(set(v
  1611. for c in conditions
  1612. for v in self[c]))
  1613. ) # this computation could be wasted
  1614. if "linewidth" not in kwargs:
  1615. kwargs["linewidth"] = 2
  1616. ax = plt.gca()
  1617. if (len(conditions) != 0):
  1618. freqs = []
  1619. for condition in conditions:
  1620. if cumulative:
  1621. # freqs should be a list of list where each sub list will be a frequency of a condition
  1622. freqs.append(list(self[condition]._cumulative_frequencies(samples)))
  1623. ylabel = "Cumulative Counts"
  1624. legend_loc = 'lower right'
  1625. if percents:
  1626. freqs[-1] = [f / freqs[len(freqs) - 1] * 100 for f in freqs]
  1627. ylabel = "Cumulative Percents"
  1628. else:
  1629. freqs.append([self[condition][sample] for sample in samples])
  1630. ylabel = "Counts"
  1631. legend_loc = 'upper right'
  1632. # percents = [f * 100 for f in freqs] only in ConditionalProbDist?
  1633. i = 0
  1634. for freq in freqs:
  1635. kwargs['label'] = conditions[i] #label for each condition
  1636. i += 1
  1637. ax.plot(freq, *args, **kwargs)
  1638. ax.legend(loc=legend_loc)
  1639. ax.grid(True, color="silver")
  1640. ax.set_xticks(range(len(samples)))
  1641. ax.set_xticklabels([str(s) for s in samples], rotation=90)
  1642. if title:
  1643. ax.set_title(title)
  1644. ax.set_xlabel("Samples")
  1645. ax.set_ylabel(ylabel)
  1646. plt.show()
  1647. return ax
  1648. def tabulate(self, *args, **kwargs):
  1649. """
  1650. Tabulate the given samples from the conditional frequency distribution.
  1651. :param samples: The samples to plot
  1652. :type samples: list
  1653. :param conditions: The conditions to plot (default is all)
  1654. :type conditions: list
  1655. :param cumulative: A flag to specify whether the freqs are cumulative (default = False)
  1656. :type title: bool
  1657. """
  1658. cumulative = _get_kwarg(kwargs, "cumulative", False)
  1659. conditions = _get_kwarg(kwargs, "conditions", sorted(self.conditions()))
  1660. samples = _get_kwarg(
  1661. kwargs,
  1662. "samples",
  1663. sorted(set(v for c in conditions if c in self for v in self[c])),
  1664. ) # this computation could be wasted
  1665. width = max(len("%s" % s) for s in samples)
  1666. freqs = dict()
  1667. for c in conditions:
  1668. if cumulative:
  1669. freqs[c] = list(self[c]._cumulative_frequencies(samples))
  1670. else:
  1671. freqs[c] = [self[c][sample] for sample in samples]
  1672. width = max(width, max(len("%d" % f) for f in freqs[c]))
  1673. condition_size = max(len("%s" % c) for c in conditions)
  1674. print(" " * condition_size, end=" ")
  1675. for s in samples:
  1676. print("%*s" % (width, s), end=" ")
  1677. print()
  1678. for c in conditions:
  1679. print("%*s" % (condition_size, c), end=" ")
  1680. for f in freqs[c]:
  1681. print("%*d" % (width, f), end=" ")
  1682. print()
  1683. # Mathematical operators
  1684. def __add__(self, other):
  1685. """
  1686. Add counts from two ConditionalFreqDists.
  1687. """
  1688. if not isinstance(other, ConditionalFreqDist):
  1689. return NotImplemented
  1690. result = ConditionalFreqDist()
  1691. for cond in self.conditions():
  1692. newfreqdist = self[cond] + other[cond]
  1693. if newfreqdist:
  1694. result[cond] = newfreqdist
  1695. for cond in other.conditions():
  1696. if cond not in self.conditions():
  1697. for elem, count in other[cond].items():
  1698. if count > 0:
  1699. result[cond][elem] = count
  1700. return result
  1701. def __sub__(self, other):
  1702. """
  1703. Subtract count, but keep only results with positive counts.
  1704. """
  1705. if not isinstance(other, ConditionalFreqDist):
  1706. return NotImplemented
  1707. result = ConditionalFreqDist()
  1708. for cond in self.conditions():
  1709. newfreqdist = self[cond] - other[cond]
  1710. if newfreqdist:
  1711. result[cond] = newfreqdist
  1712. for cond in other.conditions():
  1713. if cond not in self.conditions():
  1714. for elem, count in other[cond].items():
  1715. if count < 0:
  1716. result[cond][elem] = 0 - count
  1717. return result
  1718. def __or__(self, other):
  1719. """
  1720. Union is the maximum of value in either of the input counters.
  1721. """
  1722. if not isinstance(other, ConditionalFreqDist):
  1723. return NotImplemented
  1724. result = ConditionalFreqDist()
  1725. for cond in self.conditions():
  1726. newfreqdist = self[cond] | other[cond]
  1727. if newfreqdist:
  1728. result[cond] = newfreqdist
  1729. for cond in other.conditions():
  1730. if cond not in self.conditions():
  1731. for elem, count in other[cond].items():
  1732. if count > 0:
  1733. result[cond][elem] = count
  1734. return result
  1735. def __and__(self, other):
  1736. """
  1737. Intersection is the minimum of corresponding counts.
  1738. """
  1739. if not isinstance(other, ConditionalFreqDist):
  1740. return NotImplemented
  1741. result = ConditionalFreqDist()
  1742. for cond in self.conditions():
  1743. newfreqdist = self[cond] & other[cond]
  1744. if newfreqdist:
  1745. result[cond] = newfreqdist
  1746. return result
  1747. # @total_ordering doesn't work here, since the class inherits from a builtin class
  1748. def __le__(self, other):
  1749. if not isinstance(other, ConditionalFreqDist):
  1750. raise_unorderable_types("<=", self, other)
  1751. return set(self.conditions()).issubset(other.conditions()) and all(
  1752. self[c] <= other[c] for c in self.conditions()
  1753. )
  1754. def __lt__(self, other):
  1755. if not isinstance(other, ConditionalFreqDist):
  1756. raise_unorderable_types("<", self, other)
  1757. return self <= other and self != other
  1758. def __ge__(self, other):
  1759. if not isinstance(other, ConditionalFreqDist):
  1760. raise_unorderable_types(">=", self, other)
  1761. return other <= self
  1762. def __gt__(self, other):
  1763. if not isinstance(other, ConditionalFreqDist):
  1764. raise_unorderable_types(">", self, other)
  1765. return other < self
  1766. def __repr__(self):
  1767. """
  1768. Return a string representation of this ``ConditionalFreqDist``.
  1769. :rtype: str
  1770. """
  1771. return "<ConditionalFreqDist with %d conditions>" % len(self)
  1772. class ConditionalProbDistI(dict, metaclass=ABCMeta):
  1773. """
  1774. A collection of probability distributions for a single experiment
  1775. run under different conditions. Conditional probability
  1776. distributions are used to estimate the likelihood of each sample,
  1777. given the condition under which the experiment was run. For
  1778. example, a conditional probability distribution could be used to
  1779. estimate the probability of each word type in a document, given
  1780. the length of the word type. Formally, a conditional probability
  1781. distribution can be defined as a function that maps from each
  1782. condition to the ``ProbDist`` for the experiment under that
  1783. condition.
  1784. """
  1785. @abstractmethod
  1786. def __init__(self):
  1787. """
  1788. Classes inheriting from ConditionalProbDistI should implement __init__.
  1789. """
  1790. def conditions(self):
  1791. """
  1792. Return a list of the conditions that are represented by
  1793. this ``ConditionalProbDist``. Use the indexing operator to
  1794. access the probability distribution for a given condition.
  1795. :rtype: list
  1796. """
  1797. return list(self.keys())
  1798. def __repr__(self):
  1799. """
  1800. Return a string representation of this ``ConditionalProbDist``.
  1801. :rtype: str
  1802. """
  1803. return "<%s with %d conditions>" % (type(self).__name__, len(self))
  1804. class ConditionalProbDist(ConditionalProbDistI):
  1805. """
  1806. A conditional probability distribution modeling the experiments
  1807. that were used to generate a conditional frequency distribution.
  1808. A ConditionalProbDist is constructed from a
  1809. ``ConditionalFreqDist`` and a ``ProbDist`` factory:
  1810. - The ``ConditionalFreqDist`` specifies the frequency
  1811. distribution for each condition.
  1812. - The ``ProbDist`` factory is a function that takes a
  1813. condition's frequency distribution, and returns its
  1814. probability distribution. A ``ProbDist`` class's name (such as
  1815. ``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
  1816. that class's constructor.
  1817. The first argument to the ``ProbDist`` factory is the frequency
  1818. distribution that it should model; and the remaining arguments are
  1819. specified by the ``factory_args`` parameter to the
  1820. ``ConditionalProbDist`` constructor. For example, the following
  1821. code constructs a ``ConditionalProbDist``, where the probability
  1822. distribution for each condition is an ``ELEProbDist`` with 10 bins:
  1823. >>> from nltk.corpus import brown
  1824. >>> from nltk.probability import ConditionalFreqDist
  1825. >>> from nltk.probability import ConditionalProbDist, ELEProbDist
  1826. >>> cfdist = ConditionalFreqDist(brown.tagged_words()[:5000])
  1827. >>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
  1828. >>> cpdist['passed'].max()
  1829. 'VBD'
  1830. >>> cpdist['passed'].prob('VBD')
  1831. 0.423...
  1832. """
  1833. def __init__(self, cfdist, probdist_factory, *factory_args, **factory_kw_args):
  1834. """
  1835. Construct a new conditional probability distribution, based on
  1836. the given conditional frequency distribution and ``ProbDist``
  1837. factory.
  1838. :type cfdist: ConditionalFreqDist
  1839. :param cfdist: The ``ConditionalFreqDist`` specifying the
  1840. frequency distribution for each condition.
  1841. :type probdist_factory: class or function
  1842. :param probdist_factory: The function or class that maps
  1843. a condition's frequency distribution to its probability
  1844. distribution. The function is called with the frequency
  1845. distribution as its first argument,
  1846. ``factory_args`` as its remaining arguments, and
  1847. ``factory_kw_args`` as keyword arguments.
  1848. :type factory_args: (any)
  1849. :param factory_args: Extra arguments for ``probdist_factory``.
  1850. These arguments are usually used to specify extra
  1851. properties for the probability distributions of individual
  1852. conditions, such as the number of bins they contain.
  1853. :type factory_kw_args: (any)
  1854. :param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
  1855. """
  1856. self._probdist_factory = probdist_factory
  1857. self._factory_args = factory_args
  1858. self._factory_kw_args = factory_kw_args
  1859. for condition in cfdist:
  1860. self[condition] = probdist_factory(
  1861. cfdist[condition], *factory_args, **factory_kw_args
  1862. )
  1863. def __missing__(self, key):
  1864. self[key] = self._probdist_factory(
  1865. FreqDist(), *self._factory_args, **self._factory_kw_args
  1866. )
  1867. return self[key]
  1868. class DictionaryConditionalProbDist(ConditionalProbDistI):
  1869. """
  1870. An alternative ConditionalProbDist that simply wraps a dictionary of
  1871. ProbDists rather than creating these from FreqDists.
  1872. """
  1873. def __init__(self, probdist_dict):
  1874. """
  1875. :param probdist_dict: a dictionary containing the probdists indexed
  1876. by the conditions
  1877. :type probdist_dict: dict any -> probdist
  1878. """
  1879. self.update(probdist_dict)
  1880. def __missing__(self, key):
  1881. self[key] = DictionaryProbDist()
  1882. return self[key]
  1883. ##//////////////////////////////////////////////////////
  1884. ## Adding in log-space.
  1885. ##//////////////////////////////////////////////////////
  1886. # If the difference is bigger than this, then just take the bigger one:
  1887. _ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
  1888. def add_logs(logx, logy):
  1889. """
  1890. Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
  1891. *log(x+y)*. Conceptually, this is the same as returning
  1892. ``log(2**(logx)+2**(logy))``, but the actual implementation
  1893. avoids overflow errors that could result from direct computation.
  1894. """
  1895. if logx < logy + _ADD_LOGS_MAX_DIFF:
  1896. return logy
  1897. if logy < logx + _ADD_LOGS_MAX_DIFF:
  1898. return logx
  1899. base = min(logx, logy)
  1900. return base + math.log(2 ** (logx - base) + 2 ** (logy - base), 2)
  1901. def sum_logs(logs):
  1902. return reduce(add_logs, logs[1:], logs[0]) if len(logs) != 0 else _NINF
  1903. ##//////////////////////////////////////////////////////
  1904. ## Probabilistic Mix-in
  1905. ##//////////////////////////////////////////////////////
  1906. class ProbabilisticMixIn(object):
  1907. """
  1908. A mix-in class to associate probabilities with other classes
  1909. (trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
  1910. define a new class that derives from an existing class and from
  1911. ProbabilisticMixIn. You will need to define a new constructor for
  1912. the new class, which explicitly calls the constructors of both its
  1913. parent classes. For example:
  1914. >>> from nltk.probability import ProbabilisticMixIn
  1915. >>> class A:
  1916. ... def __init__(self, x, y): self.data = (x,y)
  1917. ...
  1918. >>> class ProbabilisticA(A, ProbabilisticMixIn):
  1919. ... def __init__(self, x, y, **prob_kwarg):
  1920. ... A.__init__(self, x, y)
  1921. ... ProbabilisticMixIn.__init__(self, **prob_kwarg)
  1922. See the documentation for the ProbabilisticMixIn
  1923. ``constructor<__init__>`` for information about the arguments it
  1924. expects.
  1925. You should generally also redefine the string representation
  1926. methods, the comparison methods, and the hashing method.
  1927. """
  1928. def __init__(self, **kwargs):
  1929. """
  1930. Initialize this object's probability. This initializer should
  1931. be called by subclass constructors. ``prob`` should generally be
  1932. the first argument for those constructors.
  1933. :param prob: The probability associated with the object.
  1934. :type prob: float
  1935. :param logprob: The log of the probability associated with
  1936. the object.
  1937. :type logprob: float
  1938. """
  1939. if "prob" in kwargs:
  1940. if "logprob" in kwargs:
  1941. raise TypeError("Must specify either prob or logprob " "(not both)")
  1942. else:
  1943. ProbabilisticMixIn.set_prob(self, kwargs["prob"])
  1944. elif "logprob" in kwargs:
  1945. ProbabilisticMixIn.set_logprob(self, kwargs["logprob"])
  1946. else:
  1947. self.__prob = self.__logprob = None
  1948. def set_prob(self, prob):
  1949. """
  1950. Set the probability associated with this object to ``prob``.
  1951. :param prob: The new probability
  1952. :type prob: float
  1953. """
  1954. self.__prob = prob
  1955. self.__logprob = None
  1956. def set_logprob(self, logprob):
  1957. """
  1958. Set the log probability associated with this object to
  1959. ``logprob``. I.e., set the probability associated with this
  1960. object to ``2**(logprob)``.
  1961. :param logprob: The new log probability
  1962. :type logprob: float
  1963. """
  1964. self.__logprob = logprob
  1965. self.__prob = None
  1966. def prob(self):
  1967. """
  1968. Return the probability associated with this object.
  1969. :rtype: float
  1970. """
  1971. if self.__prob is None:
  1972. if self.__logprob is None:
  1973. return None
  1974. self.__prob = 2 ** (self.__logprob)
  1975. return self.__prob
  1976. def logprob(self):
  1977. """
  1978. Return ``log(p)``, where ``p`` is the probability associated
  1979. with this object.
  1980. :rtype: float
  1981. """
  1982. if self.__logprob is None:
  1983. if self.__prob is None:
  1984. return None
  1985. self.__logprob = math.log(self.__prob, 2)
  1986. return self.__logprob
  1987. class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
  1988. def set_prob(self, prob):
  1989. raise ValueError("%s is immutable" % self.__class__.__name__)
  1990. def set_logprob(self, prob):
  1991. raise ValueError("%s is immutable" % self.__class__.__name__)
  1992. ## Helper function for processing keyword arguments
  1993. def _get_kwarg(kwargs, key, default):
  1994. if key in kwargs:
  1995. arg = kwargs[key]
  1996. del kwargs[key]
  1997. else:
  1998. arg = default
  1999. return arg
  2000. ##//////////////////////////////////////////////////////
  2001. ## Demonstration
  2002. ##//////////////////////////////////////////////////////
  2003. def _create_rand_fdist(numsamples, numoutcomes):
  2004. """
  2005. Create a new frequency distribution, with random samples. The
  2006. samples are numbers from 1 to ``numsamples``, and are generated by
  2007. summing two numbers, each of which has a uniform distribution.
  2008. """
  2009. fdist = FreqDist()
  2010. for x in range(numoutcomes):
  2011. y = random.randint(1, (1 + numsamples) // 2) + random.randint(
  2012. 0, numsamples // 2
  2013. )
  2014. fdist[y] += 1
  2015. return fdist
  2016. def _create_sum_pdist(numsamples):
  2017. """
  2018. Return the true probability distribution for the experiment
  2019. ``_create_rand_fdist(numsamples, x)``.
  2020. """
  2021. fdist = FreqDist()
  2022. for x in range(1, (1 + numsamples) // 2 + 1):
  2023. for y in range(0, numsamples // 2 + 1):
  2024. fdist[x + y] += 1
  2025. return MLEProbDist(fdist)
  2026. def demo(numsamples=6, numoutcomes=500):
  2027. """
  2028. A demonstration of frequency distributions and probability
  2029. distributions. This demonstration creates three frequency
  2030. distributions with, and uses them to sample a random process with
  2031. ``numsamples`` samples. Each frequency distribution is sampled
  2032. ``numoutcomes`` times. These three frequency distributions are
  2033. then used to build six probability distributions. Finally, the
  2034. probability estimates of these distributions are compared to the
  2035. actual probability of each sample.
  2036. :type numsamples: int
  2037. :param numsamples: The number of samples to use in each demo
  2038. frequency distributions.
  2039. :type numoutcomes: int
  2040. :param numoutcomes: The total number of outcomes for each
  2041. demo frequency distribution. These outcomes are divided into
  2042. ``numsamples`` bins.
  2043. :rtype: None
  2044. """
  2045. # Randomly sample a stochastic process three times.
  2046. fdist1 = _create_rand_fdist(numsamples, numoutcomes)
  2047. fdist2 = _create_rand_fdist(numsamples, numoutcomes)
  2048. fdist3 = _create_rand_fdist(numsamples, numoutcomes)
  2049. # Use our samples to create probability distributions.
  2050. pdists = [
  2051. MLEProbDist(fdist1),
  2052. LidstoneProbDist(fdist1, 0.5, numsamples),
  2053. HeldoutProbDist(fdist1, fdist2, numsamples),
  2054. HeldoutProbDist(fdist2, fdist1, numsamples),
  2055. CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
  2056. SimpleGoodTuringProbDist(fdist1),
  2057. SimpleGoodTuringProbDist(fdist1, 7),
  2058. _create_sum_pdist(numsamples),
  2059. ]
  2060. # Find the probability of each sample.
  2061. vals = []
  2062. for n in range(1, numsamples + 1):
  2063. vals.append(tuple([n, fdist1.freq(n)] + [pdist.prob(n) for pdist in pdists]))
  2064. # Print the results in a formatted table.
  2065. print(
  2066. (
  2067. "%d samples (1-%d); %d outcomes were sampled for each FreqDist"
  2068. % (numsamples, numsamples, numoutcomes)
  2069. )
  2070. )
  2071. print("=" * 9 * (len(pdists) + 2))
  2072. FORMATSTR = " FreqDist " + "%8s " * (len(pdists) - 1) + "| Actual"
  2073. print(FORMATSTR % tuple(repr(pdist)[1:9] for pdist in pdists[:-1]))
  2074. print("-" * 9 * (len(pdists) + 2))
  2075. FORMATSTR = "%3d %8.6f " + "%8.6f " * (len(pdists) - 1) + "| %8.6f"
  2076. for val in vals:
  2077. print(FORMATSTR % val)
  2078. # Print the totals for each column (should all be 1.0)
  2079. zvals = list(zip(*vals))
  2080. sums = [sum(val) for val in zvals[1:]]
  2081. print("-" * 9 * (len(pdists) + 2))
  2082. FORMATSTR = "Total " + "%8.6f " * (len(pdists)) + "| %8.6f"
  2083. print(FORMATSTR % tuple(sums))
  2084. print("=" * 9 * (len(pdists) + 2))
  2085. # Display the distributions themselves, if they're short enough.
  2086. if len("%s" % fdist1) < 70:
  2087. print(" fdist1: %s" % fdist1)
  2088. print(" fdist2: %s" % fdist2)
  2089. print(" fdist3: %s" % fdist3)
  2090. print()
  2091. print("Generating:")
  2092. for pdist in pdists:
  2093. fdist = FreqDist(pdist.generate() for i in range(5000))
  2094. print("%20s %s" % (pdist.__class__.__name__[:20], ("%s" % fdist)[:55]))
  2095. print()
  2096. def gt_demo():
  2097. from nltk import corpus
  2098. emma_words = corpus.gutenberg.words("austen-emma.txt")
  2099. fd = FreqDist(emma_words)
  2100. sgt = SimpleGoodTuringProbDist(fd)
  2101. print("%18s %8s %14s" % ("word", "freqency", "SimpleGoodTuring"))
  2102. fd_keys_sorted = (
  2103. key for key, value in sorted(fd.items(), key=lambda item: item[1], reverse=True)
  2104. )
  2105. for key in fd_keys_sorted:
  2106. print("%18s %8d %14e" % (key, fd[key], sgt.prob(key)))
  2107. if __name__ == "__main__":
  2108. demo(6, 10)
  2109. demo(5, 5000)
  2110. gt_demo()
  2111. __all__ = [
  2112. "ConditionalFreqDist",
  2113. "ConditionalProbDist",
  2114. "ConditionalProbDistI",
  2115. "CrossValidationProbDist",
  2116. "DictionaryConditionalProbDist",
  2117. "DictionaryProbDist",
  2118. "ELEProbDist",
  2119. "FreqDist",
  2120. "SimpleGoodTuringProbDist",
  2121. "HeldoutProbDist",
  2122. "ImmutableProbabilisticMixIn",
  2123. "LaplaceProbDist",
  2124. "LidstoneProbDist",
  2125. "MLEProbDist",
  2126. "MutableProbDist",
  2127. "KneserNeyProbDist",
  2128. "ProbDistI",
  2129. "ProbabilisticMixIn",
  2130. "UniformProbDist",
  2131. "WittenBellProbDist",
  2132. "add_logs",
  2133. "log_likelihood",
  2134. "sum_logs",
  2135. "entropy",
  2136. ]