_header_value_parser.py 102 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965
  1. """Header value parser implementing various email-related RFC parsing rules.
  2. The parsing methods defined in this module implement various email related
  3. parsing rules. Principal among them is RFC 5322, which is the followon
  4. to RFC 2822 and primarily a clarification of the former. It also implements
  5. RFC 2047 encoded word decoding.
  6. RFC 5322 goes to considerable trouble to maintain backward compatibility with
  7. RFC 822 in the parse phase, while cleaning up the structure on the generation
  8. phase. This parser supports correct RFC 5322 generation by tagging white space
  9. as folding white space only when folding is allowed in the non-obsolete rule
  10. sets. Actually, the parser is even more generous when accepting input than RFC
  11. 5322 mandates, following the spirit of Postel's Law, which RFC 5322 encourages.
  12. Where possible deviations from the standard are annotated on the 'defects'
  13. attribute of tokens that deviate.
  14. The general structure of the parser follows RFC 5322, and uses its terminology
  15. where there is a direct correspondence. Where the implementation requires a
  16. somewhat different structure than that used by the formal grammar, new terms
  17. that mimic the closest existing terms are used. Thus, it really helps to have
  18. a copy of RFC 5322 handy when studying this code.
  19. Input to the parser is a string that has already been unfolded according to
  20. RFC 5322 rules. According to the RFC this unfolding is the very first step, and
  21. this parser leaves the unfolding step to a higher level message parser, which
  22. will have already detected the line breaks that need unfolding while
  23. determining the beginning and end of each header.
  24. The output of the parser is a TokenList object, which is a list subclass. A
  25. TokenList is a recursive data structure. The terminal nodes of the structure
  26. are Terminal objects, which are subclasses of str. These do not correspond
  27. directly to terminal objects in the formal grammar, but are instead more
  28. practical higher level combinations of true terminals.
  29. All TokenList and Terminal objects have a 'value' attribute, which produces the
  30. semantically meaningful value of that part of the parse subtree. The value of
  31. all whitespace tokens (no matter how many sub-tokens they may contain) is a
  32. single space, as per the RFC rules. This includes 'CFWS', which is herein
  33. included in the general class of whitespace tokens. There is one exception to
  34. the rule that whitespace tokens are collapsed into single spaces in values: in
  35. the value of a 'bare-quoted-string' (a quoted-string with no leading or
  36. trailing whitespace), any whitespace that appeared between the quotation marks
  37. is preserved in the returned value. Note that in all Terminal strings quoted
  38. pairs are turned into their unquoted values.
  39. All TokenList and Terminal objects also have a string value, which attempts to
  40. be a "canonical" representation of the RFC-compliant form of the substring that
  41. produced the parsed subtree, including minimal use of quoted pair quoting.
  42. Whitespace runs are not collapsed.
  43. Comment tokens also have a 'content' attribute providing the string found
  44. between the parens (including any nested comments) with whitespace preserved.
  45. All TokenList and Terminal objects have a 'defects' attribute which is a
  46. possibly empty list all of the defects found while creating the token. Defects
  47. may appear on any token in the tree, and a composite list of all defects in the
  48. subtree is available through the 'all_defects' attribute of any node. (For
  49. Terminal notes x.defects == x.all_defects.)
  50. Each object in a parse tree is called a 'token', and each has a 'token_type'
  51. attribute that gives the name from the RFC 5322 grammar that it represents.
  52. Not all RFC 5322 nodes are produced, and there is one non-RFC 5322 node that
  53. may be produced: 'ptext'. A 'ptext' is a string of printable ascii characters.
  54. It is returned in place of lists of (ctext/quoted-pair) and
  55. (qtext/quoted-pair).
  56. XXX: provide complete list of token types.
  57. """
  58. from __future__ import print_function
  59. from __future__ import unicode_literals
  60. from __future__ import division
  61. from __future__ import absolute_import
  62. from future.builtins import int, range, str, super, list
  63. import re
  64. from collections import namedtuple, OrderedDict
  65. from future.backports.urllib.parse import (unquote, unquote_to_bytes)
  66. from future.backports.email import _encoded_words as _ew
  67. from future.backports.email import errors
  68. from future.backports.email import utils
  69. #
  70. # Useful constants and functions
  71. #
  72. WSP = set(' \t')
  73. CFWS_LEADER = WSP | set('(')
  74. SPECIALS = set(r'()<>@,:;.\"[]')
  75. ATOM_ENDS = SPECIALS | WSP
  76. DOT_ATOM_ENDS = ATOM_ENDS - set('.')
  77. # '.', '"', and '(' do not end phrases in order to support obs-phrase
  78. PHRASE_ENDS = SPECIALS - set('."(')
  79. TSPECIALS = (SPECIALS | set('/?=')) - set('.')
  80. TOKEN_ENDS = TSPECIALS | WSP
  81. ASPECIALS = TSPECIALS | set("*'%")
  82. ATTRIBUTE_ENDS = ASPECIALS | WSP
  83. EXTENDED_ATTRIBUTE_ENDS = ATTRIBUTE_ENDS - set('%')
  84. def quote_string(value):
  85. return '"'+str(value).replace('\\', '\\\\').replace('"', r'\"')+'"'
  86. #
  87. # Accumulator for header folding
  88. #
  89. class _Folded(object):
  90. def __init__(self, maxlen, policy):
  91. self.maxlen = maxlen
  92. self.policy = policy
  93. self.lastlen = 0
  94. self.stickyspace = None
  95. self.firstline = True
  96. self.done = []
  97. self.current = list() # uses l.clear()
  98. def newline(self):
  99. self.done.extend(self.current)
  100. self.done.append(self.policy.linesep)
  101. self.current.clear()
  102. self.lastlen = 0
  103. def finalize(self):
  104. if self.current:
  105. self.newline()
  106. def __str__(self):
  107. return ''.join(self.done)
  108. def append(self, stoken):
  109. self.current.append(stoken)
  110. def append_if_fits(self, token, stoken=None):
  111. if stoken is None:
  112. stoken = str(token)
  113. l = len(stoken)
  114. if self.stickyspace is not None:
  115. stickyspace_len = len(self.stickyspace)
  116. if self.lastlen + stickyspace_len + l <= self.maxlen:
  117. self.current.append(self.stickyspace)
  118. self.lastlen += stickyspace_len
  119. self.current.append(stoken)
  120. self.lastlen += l
  121. self.stickyspace = None
  122. self.firstline = False
  123. return True
  124. if token.has_fws:
  125. ws = token.pop_leading_fws()
  126. if ws is not None:
  127. self.stickyspace += str(ws)
  128. stickyspace_len += len(ws)
  129. token._fold(self)
  130. return True
  131. if stickyspace_len and l + 1 <= self.maxlen:
  132. margin = self.maxlen - l
  133. if 0 < margin < stickyspace_len:
  134. trim = stickyspace_len - margin
  135. self.current.append(self.stickyspace[:trim])
  136. self.stickyspace = self.stickyspace[trim:]
  137. stickyspace_len = trim
  138. self.newline()
  139. self.current.append(self.stickyspace)
  140. self.current.append(stoken)
  141. self.lastlen = l + stickyspace_len
  142. self.stickyspace = None
  143. self.firstline = False
  144. return True
  145. if not self.firstline:
  146. self.newline()
  147. self.current.append(self.stickyspace)
  148. self.current.append(stoken)
  149. self.stickyspace = None
  150. self.firstline = False
  151. return True
  152. if self.lastlen + l <= self.maxlen:
  153. self.current.append(stoken)
  154. self.lastlen += l
  155. return True
  156. if l < self.maxlen:
  157. self.newline()
  158. self.current.append(stoken)
  159. self.lastlen = l
  160. return True
  161. return False
  162. #
  163. # TokenList and its subclasses
  164. #
  165. class TokenList(list):
  166. token_type = None
  167. def __init__(self, *args, **kw):
  168. super(TokenList, self).__init__(*args, **kw)
  169. self.defects = []
  170. def __str__(self):
  171. return ''.join(str(x) for x in self)
  172. def __repr__(self):
  173. return '{}({})'.format(self.__class__.__name__,
  174. super(TokenList, self).__repr__())
  175. @property
  176. def value(self):
  177. return ''.join(x.value for x in self if x.value)
  178. @property
  179. def all_defects(self):
  180. return sum((x.all_defects for x in self), self.defects)
  181. #
  182. # Folding API
  183. #
  184. # parts():
  185. #
  186. # return a list of objects that constitute the "higher level syntactic
  187. # objects" specified by the RFC as the best places to fold a header line.
  188. # The returned objects must include leading folding white space, even if
  189. # this means mutating the underlying parse tree of the object. Each object
  190. # is only responsible for returning *its* parts, and should not drill down
  191. # to any lower level except as required to meet the leading folding white
  192. # space constraint.
  193. #
  194. # _fold(folded):
  195. #
  196. # folded: the result accumulator. This is an instance of _Folded.
  197. # (XXX: I haven't finished factoring this out yet, the folding code
  198. # pretty much uses this as a state object.) When the folded.current
  199. # contains as much text as will fit, the _fold method should call
  200. # folded.newline.
  201. # folded.lastlen: the current length of the test stored in folded.current.
  202. # folded.maxlen: The maximum number of characters that may appear on a
  203. # folded line. Differs from the policy setting in that "no limit" is
  204. # represented by +inf, which means it can be used in the trivially
  205. # logical fashion in comparisons.
  206. #
  207. # Currently no subclasses implement parts, and I think this will remain
  208. # true. A subclass only needs to implement _fold when the generic version
  209. # isn't sufficient. _fold will need to be implemented primarily when it is
  210. # possible for encoded words to appear in the specialized token-list, since
  211. # there is no generic algorithm that can know where exactly the encoded
  212. # words are allowed. A _fold implementation is responsible for filling
  213. # lines in the same general way that the top level _fold does. It may, and
  214. # should, call the _fold method of sub-objects in a similar fashion to that
  215. # of the top level _fold.
  216. #
  217. # XXX: I'm hoping it will be possible to factor the existing code further
  218. # to reduce redundancy and make the logic clearer.
  219. @property
  220. def parts(self):
  221. klass = self.__class__
  222. this = list()
  223. for token in self:
  224. if token.startswith_fws():
  225. if this:
  226. yield this[0] if len(this)==1 else klass(this)
  227. this.clear()
  228. end_ws = token.pop_trailing_ws()
  229. this.append(token)
  230. if end_ws:
  231. yield klass(this)
  232. this = [end_ws]
  233. if this:
  234. yield this[0] if len(this)==1 else klass(this)
  235. def startswith_fws(self):
  236. return self[0].startswith_fws()
  237. def pop_leading_fws(self):
  238. if self[0].token_type == 'fws':
  239. return self.pop(0)
  240. return self[0].pop_leading_fws()
  241. def pop_trailing_ws(self):
  242. if self[-1].token_type == 'cfws':
  243. return self.pop(-1)
  244. return self[-1].pop_trailing_ws()
  245. @property
  246. def has_fws(self):
  247. for part in self:
  248. if part.has_fws:
  249. return True
  250. return False
  251. def has_leading_comment(self):
  252. return self[0].has_leading_comment()
  253. @property
  254. def comments(self):
  255. comments = []
  256. for token in self:
  257. comments.extend(token.comments)
  258. return comments
  259. def fold(self, **_3to2kwargs):
  260. # max_line_length 0/None means no limit, ie: infinitely long.
  261. policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
  262. maxlen = policy.max_line_length or float("+inf")
  263. folded = _Folded(maxlen, policy)
  264. self._fold(folded)
  265. folded.finalize()
  266. return str(folded)
  267. def as_encoded_word(self, charset):
  268. # This works only for things returned by 'parts', which include
  269. # the leading fws, if any, that should be used.
  270. res = []
  271. ws = self.pop_leading_fws()
  272. if ws:
  273. res.append(ws)
  274. trailer = self.pop(-1) if self[-1].token_type=='fws' else ''
  275. res.append(_ew.encode(str(self), charset))
  276. res.append(trailer)
  277. return ''.join(res)
  278. def cte_encode(self, charset, policy):
  279. res = []
  280. for part in self:
  281. res.append(part.cte_encode(charset, policy))
  282. return ''.join(res)
  283. def _fold(self, folded):
  284. for part in self.parts:
  285. tstr = str(part)
  286. tlen = len(tstr)
  287. try:
  288. str(part).encode('us-ascii')
  289. except UnicodeEncodeError:
  290. if any(isinstance(x, errors.UndecodableBytesDefect)
  291. for x in part.all_defects):
  292. charset = 'unknown-8bit'
  293. else:
  294. # XXX: this should be a policy setting
  295. charset = 'utf-8'
  296. tstr = part.cte_encode(charset, folded.policy)
  297. tlen = len(tstr)
  298. if folded.append_if_fits(part, tstr):
  299. continue
  300. # Peel off the leading whitespace if any and make it sticky, to
  301. # avoid infinite recursion.
  302. ws = part.pop_leading_fws()
  303. if ws is not None:
  304. # Peel off the leading whitespace and make it sticky, to
  305. # avoid infinite recursion.
  306. folded.stickyspace = str(part.pop(0))
  307. if folded.append_if_fits(part):
  308. continue
  309. if part.has_fws:
  310. part._fold(folded)
  311. continue
  312. # There are no fold points in this one; it is too long for a single
  313. # line and can't be split...we just have to put it on its own line.
  314. folded.append(tstr)
  315. folded.newline()
  316. def pprint(self, indent=''):
  317. print('\n'.join(self._pp(indent='')))
  318. def ppstr(self, indent=''):
  319. return '\n'.join(self._pp(indent=''))
  320. def _pp(self, indent=''):
  321. yield '{}{}/{}('.format(
  322. indent,
  323. self.__class__.__name__,
  324. self.token_type)
  325. for token in self:
  326. if not hasattr(token, '_pp'):
  327. yield (indent + ' !! invalid element in token '
  328. 'list: {!r}'.format(token))
  329. else:
  330. for line in token._pp(indent+' '):
  331. yield line
  332. if self.defects:
  333. extra = ' Defects: {}'.format(self.defects)
  334. else:
  335. extra = ''
  336. yield '{}){}'.format(indent, extra)
  337. class WhiteSpaceTokenList(TokenList):
  338. @property
  339. def value(self):
  340. return ' '
  341. @property
  342. def comments(self):
  343. return [x.content for x in self if x.token_type=='comment']
  344. class UnstructuredTokenList(TokenList):
  345. token_type = 'unstructured'
  346. def _fold(self, folded):
  347. if any(x.token_type=='encoded-word' for x in self):
  348. return self._fold_encoded(folded)
  349. # Here we can have either a pure ASCII string that may or may not
  350. # have surrogateescape encoded bytes, or a unicode string.
  351. last_ew = None
  352. for part in self.parts:
  353. tstr = str(part)
  354. is_ew = False
  355. try:
  356. str(part).encode('us-ascii')
  357. except UnicodeEncodeError:
  358. if any(isinstance(x, errors.UndecodableBytesDefect)
  359. for x in part.all_defects):
  360. charset = 'unknown-8bit'
  361. else:
  362. charset = 'utf-8'
  363. if last_ew is not None:
  364. # We've already done an EW, combine this one with it
  365. # if there's room.
  366. chunk = get_unstructured(
  367. ''.join(folded.current[last_ew:]+[tstr])).as_encoded_word(charset)
  368. oldlastlen = sum(len(x) for x in folded.current[:last_ew])
  369. schunk = str(chunk)
  370. lchunk = len(schunk)
  371. if oldlastlen + lchunk <= folded.maxlen:
  372. del folded.current[last_ew:]
  373. folded.append(schunk)
  374. folded.lastlen = oldlastlen + lchunk
  375. continue
  376. tstr = part.as_encoded_word(charset)
  377. is_ew = True
  378. if folded.append_if_fits(part, tstr):
  379. if is_ew:
  380. last_ew = len(folded.current) - 1
  381. continue
  382. if is_ew or last_ew:
  383. # It's too big to fit on the line, but since we've
  384. # got encoded words we can use encoded word folding.
  385. part._fold_as_ew(folded)
  386. continue
  387. # Peel off the leading whitespace if any and make it sticky, to
  388. # avoid infinite recursion.
  389. ws = part.pop_leading_fws()
  390. if ws is not None:
  391. folded.stickyspace = str(ws)
  392. if folded.append_if_fits(part):
  393. continue
  394. if part.has_fws:
  395. part.fold(folded)
  396. continue
  397. # It can't be split...we just have to put it on its own line.
  398. folded.append(tstr)
  399. folded.newline()
  400. last_ew = None
  401. def cte_encode(self, charset, policy):
  402. res = []
  403. last_ew = None
  404. for part in self:
  405. spart = str(part)
  406. try:
  407. spart.encode('us-ascii')
  408. res.append(spart)
  409. except UnicodeEncodeError:
  410. if last_ew is None:
  411. res.append(part.cte_encode(charset, policy))
  412. last_ew = len(res)
  413. else:
  414. tl = get_unstructured(''.join(res[last_ew:] + [spart]))
  415. res.append(tl.as_encoded_word())
  416. return ''.join(res)
  417. class Phrase(TokenList):
  418. token_type = 'phrase'
  419. def _fold(self, folded):
  420. # As with Unstructured, we can have pure ASCII with or without
  421. # surrogateescape encoded bytes, or we could have unicode. But this
  422. # case is more complicated, since we have to deal with the various
  423. # sub-token types and how they can be composed in the face of
  424. # unicode-that-needs-CTE-encoding, and the fact that if a token a
  425. # comment that becomes a barrier across which we can't compose encoded
  426. # words.
  427. last_ew = None
  428. for part in self.parts:
  429. tstr = str(part)
  430. tlen = len(tstr)
  431. has_ew = False
  432. try:
  433. str(part).encode('us-ascii')
  434. except UnicodeEncodeError:
  435. if any(isinstance(x, errors.UndecodableBytesDefect)
  436. for x in part.all_defects):
  437. charset = 'unknown-8bit'
  438. else:
  439. charset = 'utf-8'
  440. if last_ew is not None and not part.has_leading_comment():
  441. # We've already done an EW, let's see if we can combine
  442. # this one with it. The last_ew logic ensures that all we
  443. # have at this point is atoms, no comments or quoted
  444. # strings. So we can treat the text between the last
  445. # encoded word and the content of this token as
  446. # unstructured text, and things will work correctly. But
  447. # we have to strip off any trailing comment on this token
  448. # first, and if it is a quoted string we have to pull out
  449. # the content (we're encoding it, so it no longer needs to
  450. # be quoted).
  451. if part[-1].token_type == 'cfws' and part.comments:
  452. remainder = part.pop(-1)
  453. else:
  454. remainder = ''
  455. for i, token in enumerate(part):
  456. if token.token_type == 'bare-quoted-string':
  457. part[i] = UnstructuredTokenList(token[:])
  458. chunk = get_unstructured(
  459. ''.join(folded.current[last_ew:]+[tstr])).as_encoded_word(charset)
  460. schunk = str(chunk)
  461. lchunk = len(schunk)
  462. if last_ew + lchunk <= folded.maxlen:
  463. del folded.current[last_ew:]
  464. folded.append(schunk)
  465. folded.lastlen = sum(len(x) for x in folded.current)
  466. continue
  467. tstr = part.as_encoded_word(charset)
  468. tlen = len(tstr)
  469. has_ew = True
  470. if folded.append_if_fits(part, tstr):
  471. if has_ew and not part.comments:
  472. last_ew = len(folded.current) - 1
  473. elif part.comments or part.token_type == 'quoted-string':
  474. # If a comment is involved we can't combine EWs. And if a
  475. # quoted string is involved, it's not worth the effort to
  476. # try to combine them.
  477. last_ew = None
  478. continue
  479. part._fold(folded)
  480. def cte_encode(self, charset, policy):
  481. res = []
  482. last_ew = None
  483. is_ew = False
  484. for part in self:
  485. spart = str(part)
  486. try:
  487. spart.encode('us-ascii')
  488. res.append(spart)
  489. except UnicodeEncodeError:
  490. is_ew = True
  491. if last_ew is None:
  492. if not part.comments:
  493. last_ew = len(res)
  494. res.append(part.cte_encode(charset, policy))
  495. elif not part.has_leading_comment():
  496. if part[-1].token_type == 'cfws' and part.comments:
  497. remainder = part.pop(-1)
  498. else:
  499. remainder = ''
  500. for i, token in enumerate(part):
  501. if token.token_type == 'bare-quoted-string':
  502. part[i] = UnstructuredTokenList(token[:])
  503. tl = get_unstructured(''.join(res[last_ew:] + [spart]))
  504. res[last_ew:] = [tl.as_encoded_word(charset)]
  505. if part.comments or (not is_ew and part.token_type == 'quoted-string'):
  506. last_ew = None
  507. return ''.join(res)
  508. class Word(TokenList):
  509. token_type = 'word'
  510. class CFWSList(WhiteSpaceTokenList):
  511. token_type = 'cfws'
  512. def has_leading_comment(self):
  513. return bool(self.comments)
  514. class Atom(TokenList):
  515. token_type = 'atom'
  516. class Token(TokenList):
  517. token_type = 'token'
  518. class EncodedWord(TokenList):
  519. token_type = 'encoded-word'
  520. cte = None
  521. charset = None
  522. lang = None
  523. @property
  524. def encoded(self):
  525. if self.cte is not None:
  526. return self.cte
  527. _ew.encode(str(self), self.charset)
  528. class QuotedString(TokenList):
  529. token_type = 'quoted-string'
  530. @property
  531. def content(self):
  532. for x in self:
  533. if x.token_type == 'bare-quoted-string':
  534. return x.value
  535. @property
  536. def quoted_value(self):
  537. res = []
  538. for x in self:
  539. if x.token_type == 'bare-quoted-string':
  540. res.append(str(x))
  541. else:
  542. res.append(x.value)
  543. return ''.join(res)
  544. @property
  545. def stripped_value(self):
  546. for token in self:
  547. if token.token_type == 'bare-quoted-string':
  548. return token.value
  549. class BareQuotedString(QuotedString):
  550. token_type = 'bare-quoted-string'
  551. def __str__(self):
  552. return quote_string(''.join(str(x) for x in self))
  553. @property
  554. def value(self):
  555. return ''.join(str(x) for x in self)
  556. class Comment(WhiteSpaceTokenList):
  557. token_type = 'comment'
  558. def __str__(self):
  559. return ''.join(sum([
  560. ["("],
  561. [self.quote(x) for x in self],
  562. [")"],
  563. ], []))
  564. def quote(self, value):
  565. if value.token_type == 'comment':
  566. return str(value)
  567. return str(value).replace('\\', '\\\\').replace(
  568. '(', '\(').replace(
  569. ')', '\)')
  570. @property
  571. def content(self):
  572. return ''.join(str(x) for x in self)
  573. @property
  574. def comments(self):
  575. return [self.content]
  576. class AddressList(TokenList):
  577. token_type = 'address-list'
  578. @property
  579. def addresses(self):
  580. return [x for x in self if x.token_type=='address']
  581. @property
  582. def mailboxes(self):
  583. return sum((x.mailboxes
  584. for x in self if x.token_type=='address'), [])
  585. @property
  586. def all_mailboxes(self):
  587. return sum((x.all_mailboxes
  588. for x in self if x.token_type=='address'), [])
  589. class Address(TokenList):
  590. token_type = 'address'
  591. @property
  592. def display_name(self):
  593. if self[0].token_type == 'group':
  594. return self[0].display_name
  595. @property
  596. def mailboxes(self):
  597. if self[0].token_type == 'mailbox':
  598. return [self[0]]
  599. elif self[0].token_type == 'invalid-mailbox':
  600. return []
  601. return self[0].mailboxes
  602. @property
  603. def all_mailboxes(self):
  604. if self[0].token_type == 'mailbox':
  605. return [self[0]]
  606. elif self[0].token_type == 'invalid-mailbox':
  607. return [self[0]]
  608. return self[0].all_mailboxes
  609. class MailboxList(TokenList):
  610. token_type = 'mailbox-list'
  611. @property
  612. def mailboxes(self):
  613. return [x for x in self if x.token_type=='mailbox']
  614. @property
  615. def all_mailboxes(self):
  616. return [x for x in self
  617. if x.token_type in ('mailbox', 'invalid-mailbox')]
  618. class GroupList(TokenList):
  619. token_type = 'group-list'
  620. @property
  621. def mailboxes(self):
  622. if not self or self[0].token_type != 'mailbox-list':
  623. return []
  624. return self[0].mailboxes
  625. @property
  626. def all_mailboxes(self):
  627. if not self or self[0].token_type != 'mailbox-list':
  628. return []
  629. return self[0].all_mailboxes
  630. class Group(TokenList):
  631. token_type = "group"
  632. @property
  633. def mailboxes(self):
  634. if self[2].token_type != 'group-list':
  635. return []
  636. return self[2].mailboxes
  637. @property
  638. def all_mailboxes(self):
  639. if self[2].token_type != 'group-list':
  640. return []
  641. return self[2].all_mailboxes
  642. @property
  643. def display_name(self):
  644. return self[0].display_name
  645. class NameAddr(TokenList):
  646. token_type = 'name-addr'
  647. @property
  648. def display_name(self):
  649. if len(self) == 1:
  650. return None
  651. return self[0].display_name
  652. @property
  653. def local_part(self):
  654. return self[-1].local_part
  655. @property
  656. def domain(self):
  657. return self[-1].domain
  658. @property
  659. def route(self):
  660. return self[-1].route
  661. @property
  662. def addr_spec(self):
  663. return self[-1].addr_spec
  664. class AngleAddr(TokenList):
  665. token_type = 'angle-addr'
  666. @property
  667. def local_part(self):
  668. for x in self:
  669. if x.token_type == 'addr-spec':
  670. return x.local_part
  671. @property
  672. def domain(self):
  673. for x in self:
  674. if x.token_type == 'addr-spec':
  675. return x.domain
  676. @property
  677. def route(self):
  678. for x in self:
  679. if x.token_type == 'obs-route':
  680. return x.domains
  681. @property
  682. def addr_spec(self):
  683. for x in self:
  684. if x.token_type == 'addr-spec':
  685. return x.addr_spec
  686. else:
  687. return '<>'
  688. class ObsRoute(TokenList):
  689. token_type = 'obs-route'
  690. @property
  691. def domains(self):
  692. return [x.domain for x in self if x.token_type == 'domain']
  693. class Mailbox(TokenList):
  694. token_type = 'mailbox'
  695. @property
  696. def display_name(self):
  697. if self[0].token_type == 'name-addr':
  698. return self[0].display_name
  699. @property
  700. def local_part(self):
  701. return self[0].local_part
  702. @property
  703. def domain(self):
  704. return self[0].domain
  705. @property
  706. def route(self):
  707. if self[0].token_type == 'name-addr':
  708. return self[0].route
  709. @property
  710. def addr_spec(self):
  711. return self[0].addr_spec
  712. class InvalidMailbox(TokenList):
  713. token_type = 'invalid-mailbox'
  714. @property
  715. def display_name(self):
  716. return None
  717. local_part = domain = route = addr_spec = display_name
  718. class Domain(TokenList):
  719. token_type = 'domain'
  720. @property
  721. def domain(self):
  722. return ''.join(super(Domain, self).value.split())
  723. class DotAtom(TokenList):
  724. token_type = 'dot-atom'
  725. class DotAtomText(TokenList):
  726. token_type = 'dot-atom-text'
  727. class AddrSpec(TokenList):
  728. token_type = 'addr-spec'
  729. @property
  730. def local_part(self):
  731. return self[0].local_part
  732. @property
  733. def domain(self):
  734. if len(self) < 3:
  735. return None
  736. return self[-1].domain
  737. @property
  738. def value(self):
  739. if len(self) < 3:
  740. return self[0].value
  741. return self[0].value.rstrip()+self[1].value+self[2].value.lstrip()
  742. @property
  743. def addr_spec(self):
  744. nameset = set(self.local_part)
  745. if len(nameset) > len(nameset-DOT_ATOM_ENDS):
  746. lp = quote_string(self.local_part)
  747. else:
  748. lp = self.local_part
  749. if self.domain is not None:
  750. return lp + '@' + self.domain
  751. return lp
  752. class ObsLocalPart(TokenList):
  753. token_type = 'obs-local-part'
  754. class DisplayName(Phrase):
  755. token_type = 'display-name'
  756. @property
  757. def display_name(self):
  758. res = TokenList(self)
  759. if res[0].token_type == 'cfws':
  760. res.pop(0)
  761. else:
  762. if res[0][0].token_type == 'cfws':
  763. res[0] = TokenList(res[0][1:])
  764. if res[-1].token_type == 'cfws':
  765. res.pop()
  766. else:
  767. if res[-1][-1].token_type == 'cfws':
  768. res[-1] = TokenList(res[-1][:-1])
  769. return res.value
  770. @property
  771. def value(self):
  772. quote = False
  773. if self.defects:
  774. quote = True
  775. else:
  776. for x in self:
  777. if x.token_type == 'quoted-string':
  778. quote = True
  779. if quote:
  780. pre = post = ''
  781. if self[0].token_type=='cfws' or self[0][0].token_type=='cfws':
  782. pre = ' '
  783. if self[-1].token_type=='cfws' or self[-1][-1].token_type=='cfws':
  784. post = ' '
  785. return pre+quote_string(self.display_name)+post
  786. else:
  787. return super(DisplayName, self).value
  788. class LocalPart(TokenList):
  789. token_type = 'local-part'
  790. @property
  791. def value(self):
  792. if self[0].token_type == "quoted-string":
  793. return self[0].quoted_value
  794. else:
  795. return self[0].value
  796. @property
  797. def local_part(self):
  798. # Strip whitespace from front, back, and around dots.
  799. res = [DOT]
  800. last = DOT
  801. last_is_tl = False
  802. for tok in self[0] + [DOT]:
  803. if tok.token_type == 'cfws':
  804. continue
  805. if (last_is_tl and tok.token_type == 'dot' and
  806. last[-1].token_type == 'cfws'):
  807. res[-1] = TokenList(last[:-1])
  808. is_tl = isinstance(tok, TokenList)
  809. if (is_tl and last.token_type == 'dot' and
  810. tok[0].token_type == 'cfws'):
  811. res.append(TokenList(tok[1:]))
  812. else:
  813. res.append(tok)
  814. last = res[-1]
  815. last_is_tl = is_tl
  816. res = TokenList(res[1:-1])
  817. return res.value
  818. class DomainLiteral(TokenList):
  819. token_type = 'domain-literal'
  820. @property
  821. def domain(self):
  822. return ''.join(super(DomainLiteral, self).value.split())
  823. @property
  824. def ip(self):
  825. for x in self:
  826. if x.token_type == 'ptext':
  827. return x.value
  828. class MIMEVersion(TokenList):
  829. token_type = 'mime-version'
  830. major = None
  831. minor = None
  832. class Parameter(TokenList):
  833. token_type = 'parameter'
  834. sectioned = False
  835. extended = False
  836. charset = 'us-ascii'
  837. @property
  838. def section_number(self):
  839. # Because the first token, the attribute (name) eats CFWS, the second
  840. # token is always the section if there is one.
  841. return self[1].number if self.sectioned else 0
  842. @property
  843. def param_value(self):
  844. # This is part of the "handle quoted extended parameters" hack.
  845. for token in self:
  846. if token.token_type == 'value':
  847. return token.stripped_value
  848. if token.token_type == 'quoted-string':
  849. for token in token:
  850. if token.token_type == 'bare-quoted-string':
  851. for token in token:
  852. if token.token_type == 'value':
  853. return token.stripped_value
  854. return ''
  855. class InvalidParameter(Parameter):
  856. token_type = 'invalid-parameter'
  857. class Attribute(TokenList):
  858. token_type = 'attribute'
  859. @property
  860. def stripped_value(self):
  861. for token in self:
  862. if token.token_type.endswith('attrtext'):
  863. return token.value
  864. class Section(TokenList):
  865. token_type = 'section'
  866. number = None
  867. class Value(TokenList):
  868. token_type = 'value'
  869. @property
  870. def stripped_value(self):
  871. token = self[0]
  872. if token.token_type == 'cfws':
  873. token = self[1]
  874. if token.token_type.endswith(
  875. ('quoted-string', 'attribute', 'extended-attribute')):
  876. return token.stripped_value
  877. return self.value
  878. class MimeParameters(TokenList):
  879. token_type = 'mime-parameters'
  880. @property
  881. def params(self):
  882. # The RFC specifically states that the ordering of parameters is not
  883. # guaranteed and may be reordered by the transport layer. So we have
  884. # to assume the RFC 2231 pieces can come in any order. However, we
  885. # output them in the order that we first see a given name, which gives
  886. # us a stable __str__.
  887. params = OrderedDict()
  888. for token in self:
  889. if not token.token_type.endswith('parameter'):
  890. continue
  891. if token[0].token_type != 'attribute':
  892. continue
  893. name = token[0].value.strip()
  894. if name not in params:
  895. params[name] = []
  896. params[name].append((token.section_number, token))
  897. for name, parts in params.items():
  898. parts = sorted(parts)
  899. # XXX: there might be more recovery we could do here if, for
  900. # example, this is really a case of a duplicate attribute name.
  901. value_parts = []
  902. charset = parts[0][1].charset
  903. for i, (section_number, param) in enumerate(parts):
  904. if section_number != i:
  905. param.defects.append(errors.InvalidHeaderDefect(
  906. "inconsistent multipart parameter numbering"))
  907. value = param.param_value
  908. if param.extended:
  909. try:
  910. value = unquote_to_bytes(value)
  911. except UnicodeEncodeError:
  912. # source had surrogate escaped bytes. What we do now
  913. # is a bit of an open question. I'm not sure this is
  914. # the best choice, but it is what the old algorithm did
  915. value = unquote(value, encoding='latin-1')
  916. else:
  917. try:
  918. value = value.decode(charset, 'surrogateescape')
  919. except LookupError:
  920. # XXX: there should really be a custom defect for
  921. # unknown character set to make it easy to find,
  922. # because otherwise unknown charset is a silent
  923. # failure.
  924. value = value.decode('us-ascii', 'surrogateescape')
  925. if utils._has_surrogates(value):
  926. param.defects.append(errors.UndecodableBytesDefect())
  927. value_parts.append(value)
  928. value = ''.join(value_parts)
  929. yield name, value
  930. def __str__(self):
  931. params = []
  932. for name, value in self.params:
  933. if value:
  934. params.append('{}={}'.format(name, quote_string(value)))
  935. else:
  936. params.append(name)
  937. params = '; '.join(params)
  938. return ' ' + params if params else ''
  939. class ParameterizedHeaderValue(TokenList):
  940. @property
  941. def params(self):
  942. for token in reversed(self):
  943. if token.token_type == 'mime-parameters':
  944. return token.params
  945. return {}
  946. @property
  947. def parts(self):
  948. if self and self[-1].token_type == 'mime-parameters':
  949. # We don't want to start a new line if all of the params don't fit
  950. # after the value, so unwrap the parameter list.
  951. return TokenList(self[:-1] + self[-1])
  952. return TokenList(self).parts
  953. class ContentType(ParameterizedHeaderValue):
  954. token_type = 'content-type'
  955. maintype = 'text'
  956. subtype = 'plain'
  957. class ContentDisposition(ParameterizedHeaderValue):
  958. token_type = 'content-disposition'
  959. content_disposition = None
  960. class ContentTransferEncoding(TokenList):
  961. token_type = 'content-transfer-encoding'
  962. cte = '7bit'
  963. class HeaderLabel(TokenList):
  964. token_type = 'header-label'
  965. class Header(TokenList):
  966. token_type = 'header'
  967. def _fold(self, folded):
  968. folded.append(str(self.pop(0)))
  969. folded.lastlen = len(folded.current[0])
  970. # The first line of the header is different from all others: we don't
  971. # want to start a new object on a new line if it has any fold points in
  972. # it that would allow part of it to be on the first header line.
  973. # Further, if the first fold point would fit on the new line, we want
  974. # to do that, but if it doesn't we want to put it on the first line.
  975. # Folded supports this via the stickyspace attribute. If this
  976. # attribute is not None, it does the special handling.
  977. folded.stickyspace = str(self.pop(0)) if self[0].token_type == 'cfws' else ''
  978. rest = self.pop(0)
  979. if self:
  980. raise ValueError("Malformed Header token list")
  981. rest._fold(folded)
  982. #
  983. # Terminal classes and instances
  984. #
  985. class Terminal(str):
  986. def __new__(cls, value, token_type):
  987. self = super(Terminal, cls).__new__(cls, value)
  988. self.token_type = token_type
  989. self.defects = []
  990. return self
  991. def __repr__(self):
  992. return "{}({})".format(self.__class__.__name__, super(Terminal, self).__repr__())
  993. @property
  994. def all_defects(self):
  995. return list(self.defects)
  996. def _pp(self, indent=''):
  997. return ["{}{}/{}({}){}".format(
  998. indent,
  999. self.__class__.__name__,
  1000. self.token_type,
  1001. super(Terminal, self).__repr__(),
  1002. '' if not self.defects else ' {}'.format(self.defects),
  1003. )]
  1004. def cte_encode(self, charset, policy):
  1005. value = str(self)
  1006. try:
  1007. value.encode('us-ascii')
  1008. return value
  1009. except UnicodeEncodeError:
  1010. return _ew.encode(value, charset)
  1011. def pop_trailing_ws(self):
  1012. # This terminates the recursion.
  1013. return None
  1014. def pop_leading_fws(self):
  1015. # This terminates the recursion.
  1016. return None
  1017. @property
  1018. def comments(self):
  1019. return []
  1020. def has_leading_comment(self):
  1021. return False
  1022. def __getnewargs__(self):
  1023. return(str(self), self.token_type)
  1024. class WhiteSpaceTerminal(Terminal):
  1025. @property
  1026. def value(self):
  1027. return ' '
  1028. def startswith_fws(self):
  1029. return True
  1030. has_fws = True
  1031. class ValueTerminal(Terminal):
  1032. @property
  1033. def value(self):
  1034. return self
  1035. def startswith_fws(self):
  1036. return False
  1037. has_fws = False
  1038. def as_encoded_word(self, charset):
  1039. return _ew.encode(str(self), charset)
  1040. class EWWhiteSpaceTerminal(WhiteSpaceTerminal):
  1041. @property
  1042. def value(self):
  1043. return ''
  1044. @property
  1045. def encoded(self):
  1046. return self[:]
  1047. def __str__(self):
  1048. return ''
  1049. has_fws = True
  1050. # XXX these need to become classes and used as instances so
  1051. # that a program can't change them in a parse tree and screw
  1052. # up other parse trees. Maybe should have tests for that, too.
  1053. DOT = ValueTerminal('.', 'dot')
  1054. ListSeparator = ValueTerminal(',', 'list-separator')
  1055. RouteComponentMarker = ValueTerminal('@', 'route-component-marker')
  1056. #
  1057. # Parser
  1058. #
  1059. """Parse strings according to RFC822/2047/2822/5322 rules.
  1060. This is a stateless parser. Each get_XXX function accepts a string and
  1061. returns either a Terminal or a TokenList representing the RFC object named
  1062. by the method and a string containing the remaining unparsed characters
  1063. from the input. Thus a parser method consumes the next syntactic construct
  1064. of a given type and returns a token representing the construct plus the
  1065. unparsed remainder of the input string.
  1066. For example, if the first element of a structured header is a 'phrase',
  1067. then:
  1068. phrase, value = get_phrase(value)
  1069. returns the complete phrase from the start of the string value, plus any
  1070. characters left in the string after the phrase is removed.
  1071. """
  1072. _wsp_splitter = re.compile(r'([{}]+)'.format(''.join(WSP))).split
  1073. _non_atom_end_matcher = re.compile(r"[^{}]+".format(
  1074. ''.join(ATOM_ENDS).replace('\\','\\\\').replace(']','\]'))).match
  1075. _non_printable_finder = re.compile(r"[\x00-\x20\x7F]").findall
  1076. _non_token_end_matcher = re.compile(r"[^{}]+".format(
  1077. ''.join(TOKEN_ENDS).replace('\\','\\\\').replace(']','\]'))).match
  1078. _non_attribute_end_matcher = re.compile(r"[^{}]+".format(
  1079. ''.join(ATTRIBUTE_ENDS).replace('\\','\\\\').replace(']','\]'))).match
  1080. _non_extended_attribute_end_matcher = re.compile(r"[^{}]+".format(
  1081. ''.join(EXTENDED_ATTRIBUTE_ENDS).replace(
  1082. '\\','\\\\').replace(']','\]'))).match
  1083. def _validate_xtext(xtext):
  1084. """If input token contains ASCII non-printables, register a defect."""
  1085. non_printables = _non_printable_finder(xtext)
  1086. if non_printables:
  1087. xtext.defects.append(errors.NonPrintableDefect(non_printables))
  1088. if utils._has_surrogates(xtext):
  1089. xtext.defects.append(errors.UndecodableBytesDefect(
  1090. "Non-ASCII characters found in header token"))
  1091. def _get_ptext_to_endchars(value, endchars):
  1092. """Scan printables/quoted-pairs until endchars and return unquoted ptext.
  1093. This function turns a run of qcontent, ccontent-without-comments, or
  1094. dtext-with-quoted-printables into a single string by unquoting any
  1095. quoted printables. It returns the string, the remaining value, and
  1096. a flag that is True iff there were any quoted printables decoded.
  1097. """
  1098. _3to2list = list(_wsp_splitter(value, 1))
  1099. fragment, remainder, = _3to2list[:1] + [_3to2list[1:]]
  1100. vchars = []
  1101. escape = False
  1102. had_qp = False
  1103. for pos in range(len(fragment)):
  1104. if fragment[pos] == '\\':
  1105. if escape:
  1106. escape = False
  1107. had_qp = True
  1108. else:
  1109. escape = True
  1110. continue
  1111. if escape:
  1112. escape = False
  1113. elif fragment[pos] in endchars:
  1114. break
  1115. vchars.append(fragment[pos])
  1116. else:
  1117. pos = pos + 1
  1118. return ''.join(vchars), ''.join([fragment[pos:]] + remainder), had_qp
  1119. def _decode_ew_run(value):
  1120. """ Decode a run of RFC2047 encoded words.
  1121. _decode_ew_run(value) -> (text, value, defects)
  1122. Scans the supplied value for a run of tokens that look like they are RFC
  1123. 2047 encoded words, decodes those words into text according to RFC 2047
  1124. rules (whitespace between encoded words is discarded), and returns the text
  1125. and the remaining value (including any leading whitespace on the remaining
  1126. value), as well as a list of any defects encountered while decoding. The
  1127. input value may not have any leading whitespace.
  1128. """
  1129. res = []
  1130. defects = []
  1131. last_ws = ''
  1132. while value:
  1133. try:
  1134. tok, ws, value = _wsp_splitter(value, 1)
  1135. except ValueError:
  1136. tok, ws, value = value, '', ''
  1137. if not (tok.startswith('=?') and tok.endswith('?=')):
  1138. return ''.join(res), last_ws + tok + ws + value, defects
  1139. text, charset, lang, new_defects = _ew.decode(tok)
  1140. res.append(text)
  1141. defects.extend(new_defects)
  1142. last_ws = ws
  1143. return ''.join(res), last_ws, defects
  1144. def get_fws(value):
  1145. """FWS = 1*WSP
  1146. This isn't the RFC definition. We're using fws to represent tokens where
  1147. folding can be done, but when we are parsing the *un*folding has already
  1148. been done so we don't need to watch out for CRLF.
  1149. """
  1150. newvalue = value.lstrip()
  1151. fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
  1152. return fws, newvalue
  1153. def get_encoded_word(value):
  1154. """ encoded-word = "=?" charset "?" encoding "?" encoded-text "?="
  1155. """
  1156. ew = EncodedWord()
  1157. if not value.startswith('=?'):
  1158. raise errors.HeaderParseError(
  1159. "expected encoded word but found {}".format(value))
  1160. _3to2list1 = list(value[2:].split('?=', 1))
  1161. tok, remainder, = _3to2list1[:1] + [_3to2list1[1:]]
  1162. if tok == value[2:]:
  1163. raise errors.HeaderParseError(
  1164. "expected encoded word but found {}".format(value))
  1165. remstr = ''.join(remainder)
  1166. if remstr[:2].isdigit():
  1167. _3to2list3 = list(remstr.split('?=', 1))
  1168. rest, remainder, = _3to2list3[:1] + [_3to2list3[1:]]
  1169. tok = tok + '?=' + rest
  1170. if len(tok.split()) > 1:
  1171. ew.defects.append(errors.InvalidHeaderDefect(
  1172. "whitespace inside encoded word"))
  1173. ew.cte = value
  1174. value = ''.join(remainder)
  1175. try:
  1176. text, charset, lang, defects = _ew.decode('=?' + tok + '?=')
  1177. except ValueError:
  1178. raise errors.HeaderParseError(
  1179. "encoded word format invalid: '{}'".format(ew.cte))
  1180. ew.charset = charset
  1181. ew.lang = lang
  1182. ew.defects.extend(defects)
  1183. while text:
  1184. if text[0] in WSP:
  1185. token, text = get_fws(text)
  1186. ew.append(token)
  1187. continue
  1188. _3to2list5 = list(_wsp_splitter(text, 1))
  1189. chars, remainder, = _3to2list5[:1] + [_3to2list5[1:]]
  1190. vtext = ValueTerminal(chars, 'vtext')
  1191. _validate_xtext(vtext)
  1192. ew.append(vtext)
  1193. text = ''.join(remainder)
  1194. return ew, value
  1195. def get_unstructured(value):
  1196. """unstructured = (*([FWS] vchar) *WSP) / obs-unstruct
  1197. obs-unstruct = *((*LF *CR *(obs-utext) *LF *CR)) / FWS)
  1198. obs-utext = %d0 / obs-NO-WS-CTL / LF / CR
  1199. obs-NO-WS-CTL is control characters except WSP/CR/LF.
  1200. So, basically, we have printable runs, plus control characters or nulls in
  1201. the obsolete syntax, separated by whitespace. Since RFC 2047 uses the
  1202. obsolete syntax in its specification, but requires whitespace on either
  1203. side of the encoded words, I can see no reason to need to separate the
  1204. non-printable-non-whitespace from the printable runs if they occur, so we
  1205. parse this into xtext tokens separated by WSP tokens.
  1206. Because an 'unstructured' value must by definition constitute the entire
  1207. value, this 'get' routine does not return a remaining value, only the
  1208. parsed TokenList.
  1209. """
  1210. # XXX: but what about bare CR and LF? They might signal the start or
  1211. # end of an encoded word. YAGNI for now, since out current parsers
  1212. # will never send us strings with bard CR or LF.
  1213. unstructured = UnstructuredTokenList()
  1214. while value:
  1215. if value[0] in WSP:
  1216. token, value = get_fws(value)
  1217. unstructured.append(token)
  1218. continue
  1219. if value.startswith('=?'):
  1220. try:
  1221. token, value = get_encoded_word(value)
  1222. except errors.HeaderParseError:
  1223. pass
  1224. else:
  1225. have_ws = True
  1226. if len(unstructured) > 0:
  1227. if unstructured[-1].token_type != 'fws':
  1228. unstructured.defects.append(errors.InvalidHeaderDefect(
  1229. "missing whitespace before encoded word"))
  1230. have_ws = False
  1231. if have_ws and len(unstructured) > 1:
  1232. if unstructured[-2].token_type == 'encoded-word':
  1233. unstructured[-1] = EWWhiteSpaceTerminal(
  1234. unstructured[-1], 'fws')
  1235. unstructured.append(token)
  1236. continue
  1237. _3to2list7 = list(_wsp_splitter(value, 1))
  1238. tok, remainder, = _3to2list7[:1] + [_3to2list7[1:]]
  1239. vtext = ValueTerminal(tok, 'vtext')
  1240. _validate_xtext(vtext)
  1241. unstructured.append(vtext)
  1242. value = ''.join(remainder)
  1243. return unstructured
  1244. def get_qp_ctext(value):
  1245. """ctext = <printable ascii except \ ( )>
  1246. This is not the RFC ctext, since we are handling nested comments in comment
  1247. and unquoting quoted-pairs here. We allow anything except the '()'
  1248. characters, but if we find any ASCII other than the RFC defined printable
  1249. ASCII an NonPrintableDefect is added to the token's defects list. Since
  1250. quoted pairs are converted to their unquoted values, what is returned is
  1251. a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value
  1252. is ' '.
  1253. """
  1254. ptext, value, _ = _get_ptext_to_endchars(value, '()')
  1255. ptext = WhiteSpaceTerminal(ptext, 'ptext')
  1256. _validate_xtext(ptext)
  1257. return ptext, value
  1258. def get_qcontent(value):
  1259. """qcontent = qtext / quoted-pair
  1260. We allow anything except the DQUOTE character, but if we find any ASCII
  1261. other than the RFC defined printable ASCII an NonPrintableDefect is
  1262. added to the token's defects list. Any quoted pairs are converted to their
  1263. unquoted values, so what is returned is a 'ptext' token. In this case it
  1264. is a ValueTerminal.
  1265. """
  1266. ptext, value, _ = _get_ptext_to_endchars(value, '"')
  1267. ptext = ValueTerminal(ptext, 'ptext')
  1268. _validate_xtext(ptext)
  1269. return ptext, value
  1270. def get_atext(value):
  1271. """atext = <matches _atext_matcher>
  1272. We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to
  1273. the token's defects list if we find non-atext characters.
  1274. """
  1275. m = _non_atom_end_matcher(value)
  1276. if not m:
  1277. raise errors.HeaderParseError(
  1278. "expected atext but found '{}'".format(value))
  1279. atext = m.group()
  1280. value = value[len(atext):]
  1281. atext = ValueTerminal(atext, 'atext')
  1282. _validate_xtext(atext)
  1283. return atext, value
  1284. def get_bare_quoted_string(value):
  1285. """bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
  1286. A quoted-string without the leading or trailing white space. Its
  1287. value is the text between the quote marks, with whitespace
  1288. preserved and quoted pairs decoded.
  1289. """
  1290. if value[0] != '"':
  1291. raise errors.HeaderParseError(
  1292. "expected '\"' but found '{}'".format(value))
  1293. bare_quoted_string = BareQuotedString()
  1294. value = value[1:]
  1295. while value and value[0] != '"':
  1296. if value[0] in WSP:
  1297. token, value = get_fws(value)
  1298. else:
  1299. token, value = get_qcontent(value)
  1300. bare_quoted_string.append(token)
  1301. if not value:
  1302. bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
  1303. "end of header inside quoted string"))
  1304. return bare_quoted_string, value
  1305. return bare_quoted_string, value[1:]
  1306. def get_comment(value):
  1307. """comment = "(" *([FWS] ccontent) [FWS] ")"
  1308. ccontent = ctext / quoted-pair / comment
  1309. We handle nested comments here, and quoted-pair in our qp-ctext routine.
  1310. """
  1311. if value and value[0] != '(':
  1312. raise errors.HeaderParseError(
  1313. "expected '(' but found '{}'".format(value))
  1314. comment = Comment()
  1315. value = value[1:]
  1316. while value and value[0] != ")":
  1317. if value[0] in WSP:
  1318. token, value = get_fws(value)
  1319. elif value[0] == '(':
  1320. token, value = get_comment(value)
  1321. else:
  1322. token, value = get_qp_ctext(value)
  1323. comment.append(token)
  1324. if not value:
  1325. comment.defects.append(errors.InvalidHeaderDefect(
  1326. "end of header inside comment"))
  1327. return comment, value
  1328. return comment, value[1:]
  1329. def get_cfws(value):
  1330. """CFWS = (1*([FWS] comment) [FWS]) / FWS
  1331. """
  1332. cfws = CFWSList()
  1333. while value and value[0] in CFWS_LEADER:
  1334. if value[0] in WSP:
  1335. token, value = get_fws(value)
  1336. else:
  1337. token, value = get_comment(value)
  1338. cfws.append(token)
  1339. return cfws, value
  1340. def get_quoted_string(value):
  1341. """quoted-string = [CFWS] <bare-quoted-string> [CFWS]
  1342. 'bare-quoted-string' is an intermediate class defined by this
  1343. parser and not by the RFC grammar. It is the quoted string
  1344. without any attached CFWS.
  1345. """
  1346. quoted_string = QuotedString()
  1347. if value and value[0] in CFWS_LEADER:
  1348. token, value = get_cfws(value)
  1349. quoted_string.append(token)
  1350. token, value = get_bare_quoted_string(value)
  1351. quoted_string.append(token)
  1352. if value and value[0] in CFWS_LEADER:
  1353. token, value = get_cfws(value)
  1354. quoted_string.append(token)
  1355. return quoted_string, value
  1356. def get_atom(value):
  1357. """atom = [CFWS] 1*atext [CFWS]
  1358. """
  1359. atom = Atom()
  1360. if value and value[0] in CFWS_LEADER:
  1361. token, value = get_cfws(value)
  1362. atom.append(token)
  1363. if value and value[0] in ATOM_ENDS:
  1364. raise errors.HeaderParseError(
  1365. "expected atom but found '{}'".format(value))
  1366. token, value = get_atext(value)
  1367. atom.append(token)
  1368. if value and value[0] in CFWS_LEADER:
  1369. token, value = get_cfws(value)
  1370. atom.append(token)
  1371. return atom, value
  1372. def get_dot_atom_text(value):
  1373. """ dot-text = 1*atext *("." 1*atext)
  1374. """
  1375. dot_atom_text = DotAtomText()
  1376. if not value or value[0] in ATOM_ENDS:
  1377. raise errors.HeaderParseError("expected atom at a start of "
  1378. "dot-atom-text but found '{}'".format(value))
  1379. while value and value[0] not in ATOM_ENDS:
  1380. token, value = get_atext(value)
  1381. dot_atom_text.append(token)
  1382. if value and value[0] == '.':
  1383. dot_atom_text.append(DOT)
  1384. value = value[1:]
  1385. if dot_atom_text[-1] is DOT:
  1386. raise errors.HeaderParseError("expected atom at end of dot-atom-text "
  1387. "but found '{}'".format('.'+value))
  1388. return dot_atom_text, value
  1389. def get_dot_atom(value):
  1390. """ dot-atom = [CFWS] dot-atom-text [CFWS]
  1391. """
  1392. dot_atom = DotAtom()
  1393. if value[0] in CFWS_LEADER:
  1394. token, value = get_cfws(value)
  1395. dot_atom.append(token)
  1396. token, value = get_dot_atom_text(value)
  1397. dot_atom.append(token)
  1398. if value and value[0] in CFWS_LEADER:
  1399. token, value = get_cfws(value)
  1400. dot_atom.append(token)
  1401. return dot_atom, value
  1402. def get_word(value):
  1403. """word = atom / quoted-string
  1404. Either atom or quoted-string may start with CFWS. We have to peel off this
  1405. CFWS first to determine which type of word to parse. Afterward we splice
  1406. the leading CFWS, if any, into the parsed sub-token.
  1407. If neither an atom or a quoted-string is found before the next special, a
  1408. HeaderParseError is raised.
  1409. The token returned is either an Atom or a QuotedString, as appropriate.
  1410. This means the 'word' level of the formal grammar is not represented in the
  1411. parse tree; this is because having that extra layer when manipulating the
  1412. parse tree is more confusing than it is helpful.
  1413. """
  1414. if value[0] in CFWS_LEADER:
  1415. leader, value = get_cfws(value)
  1416. else:
  1417. leader = None
  1418. if value[0]=='"':
  1419. token, value = get_quoted_string(value)
  1420. elif value[0] in SPECIALS:
  1421. raise errors.HeaderParseError("Expected 'atom' or 'quoted-string' "
  1422. "but found '{}'".format(value))
  1423. else:
  1424. token, value = get_atom(value)
  1425. if leader is not None:
  1426. token[:0] = [leader]
  1427. return token, value
  1428. def get_phrase(value):
  1429. """ phrase = 1*word / obs-phrase
  1430. obs-phrase = word *(word / "." / CFWS)
  1431. This means a phrase can be a sequence of words, periods, and CFWS in any
  1432. order as long as it starts with at least one word. If anything other than
  1433. words is detected, an ObsoleteHeaderDefect is added to the token's defect
  1434. list. We also accept a phrase that starts with CFWS followed by a dot;
  1435. this is registered as an InvalidHeaderDefect, since it is not supported by
  1436. even the obsolete grammar.
  1437. """
  1438. phrase = Phrase()
  1439. try:
  1440. token, value = get_word(value)
  1441. phrase.append(token)
  1442. except errors.HeaderParseError:
  1443. phrase.defects.append(errors.InvalidHeaderDefect(
  1444. "phrase does not start with word"))
  1445. while value and value[0] not in PHRASE_ENDS:
  1446. if value[0]=='.':
  1447. phrase.append(DOT)
  1448. phrase.defects.append(errors.ObsoleteHeaderDefect(
  1449. "period in 'phrase'"))
  1450. value = value[1:]
  1451. else:
  1452. try:
  1453. token, value = get_word(value)
  1454. except errors.HeaderParseError:
  1455. if value[0] in CFWS_LEADER:
  1456. token, value = get_cfws(value)
  1457. phrase.defects.append(errors.ObsoleteHeaderDefect(
  1458. "comment found without atom"))
  1459. else:
  1460. raise
  1461. phrase.append(token)
  1462. return phrase, value
  1463. def get_local_part(value):
  1464. """ local-part = dot-atom / quoted-string / obs-local-part
  1465. """
  1466. local_part = LocalPart()
  1467. leader = None
  1468. if value[0] in CFWS_LEADER:
  1469. leader, value = get_cfws(value)
  1470. if not value:
  1471. raise errors.HeaderParseError(
  1472. "expected local-part but found '{}'".format(value))
  1473. try:
  1474. token, value = get_dot_atom(value)
  1475. except errors.HeaderParseError:
  1476. try:
  1477. token, value = get_word(value)
  1478. except errors.HeaderParseError:
  1479. if value[0] != '\\' and value[0] in PHRASE_ENDS:
  1480. raise
  1481. token = TokenList()
  1482. if leader is not None:
  1483. token[:0] = [leader]
  1484. local_part.append(token)
  1485. if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
  1486. obs_local_part, value = get_obs_local_part(str(local_part) + value)
  1487. if obs_local_part.token_type == 'invalid-obs-local-part':
  1488. local_part.defects.append(errors.InvalidHeaderDefect(
  1489. "local-part is not dot-atom, quoted-string, or obs-local-part"))
  1490. else:
  1491. local_part.defects.append(errors.ObsoleteHeaderDefect(
  1492. "local-part is not a dot-atom (contains CFWS)"))
  1493. local_part[0] = obs_local_part
  1494. try:
  1495. local_part.value.encode('ascii')
  1496. except UnicodeEncodeError:
  1497. local_part.defects.append(errors.NonASCIILocalPartDefect(
  1498. "local-part contains non-ASCII characters)"))
  1499. return local_part, value
  1500. def get_obs_local_part(value):
  1501. """ obs-local-part = word *("." word)
  1502. """
  1503. obs_local_part = ObsLocalPart()
  1504. last_non_ws_was_dot = False
  1505. while value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
  1506. if value[0] == '.':
  1507. if last_non_ws_was_dot:
  1508. obs_local_part.defects.append(errors.InvalidHeaderDefect(
  1509. "invalid repeated '.'"))
  1510. obs_local_part.append(DOT)
  1511. last_non_ws_was_dot = True
  1512. value = value[1:]
  1513. continue
  1514. elif value[0]=='\\':
  1515. obs_local_part.append(ValueTerminal(value[0],
  1516. 'misplaced-special'))
  1517. value = value[1:]
  1518. obs_local_part.defects.append(errors.InvalidHeaderDefect(
  1519. "'\\' character outside of quoted-string/ccontent"))
  1520. last_non_ws_was_dot = False
  1521. continue
  1522. if obs_local_part and obs_local_part[-1].token_type != 'dot':
  1523. obs_local_part.defects.append(errors.InvalidHeaderDefect(
  1524. "missing '.' between words"))
  1525. try:
  1526. token, value = get_word(value)
  1527. last_non_ws_was_dot = False
  1528. except errors.HeaderParseError:
  1529. if value[0] not in CFWS_LEADER:
  1530. raise
  1531. token, value = get_cfws(value)
  1532. obs_local_part.append(token)
  1533. if (obs_local_part[0].token_type == 'dot' or
  1534. obs_local_part[0].token_type=='cfws' and
  1535. obs_local_part[1].token_type=='dot'):
  1536. obs_local_part.defects.append(errors.InvalidHeaderDefect(
  1537. "Invalid leading '.' in local part"))
  1538. if (obs_local_part[-1].token_type == 'dot' or
  1539. obs_local_part[-1].token_type=='cfws' and
  1540. obs_local_part[-2].token_type=='dot'):
  1541. obs_local_part.defects.append(errors.InvalidHeaderDefect(
  1542. "Invalid trailing '.' in local part"))
  1543. if obs_local_part.defects:
  1544. obs_local_part.token_type = 'invalid-obs-local-part'
  1545. return obs_local_part, value
  1546. def get_dtext(value):
  1547. """ dtext = <printable ascii except \ [ ]> / obs-dtext
  1548. obs-dtext = obs-NO-WS-CTL / quoted-pair
  1549. We allow anything except the excluded characters, but if we find any
  1550. ASCII other than the RFC defined printable ASCII an NonPrintableDefect is
  1551. added to the token's defects list. Quoted pairs are converted to their
  1552. unquoted values, so what is returned is a ptext token, in this case a
  1553. ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
  1554. added to the returned token's defect list.
  1555. """
  1556. ptext, value, had_qp = _get_ptext_to_endchars(value, '[]')
  1557. ptext = ValueTerminal(ptext, 'ptext')
  1558. if had_qp:
  1559. ptext.defects.append(errors.ObsoleteHeaderDefect(
  1560. "quoted printable found in domain-literal"))
  1561. _validate_xtext(ptext)
  1562. return ptext, value
  1563. def _check_for_early_dl_end(value, domain_literal):
  1564. if value:
  1565. return False
  1566. domain_literal.append(errors.InvalidHeaderDefect(
  1567. "end of input inside domain-literal"))
  1568. domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
  1569. return True
  1570. def get_domain_literal(value):
  1571. """ domain-literal = [CFWS] "[" *([FWS] dtext) [FWS] "]" [CFWS]
  1572. """
  1573. domain_literal = DomainLiteral()
  1574. if value[0] in CFWS_LEADER:
  1575. token, value = get_cfws(value)
  1576. domain_literal.append(token)
  1577. if not value:
  1578. raise errors.HeaderParseError("expected domain-literal")
  1579. if value[0] != '[':
  1580. raise errors.HeaderParseError("expected '[' at start of domain-literal "
  1581. "but found '{}'".format(value))
  1582. value = value[1:]
  1583. if _check_for_early_dl_end(value, domain_literal):
  1584. return domain_literal, value
  1585. domain_literal.append(ValueTerminal('[', 'domain-literal-start'))
  1586. if value[0] in WSP:
  1587. token, value = get_fws(value)
  1588. domain_literal.append(token)
  1589. token, value = get_dtext(value)
  1590. domain_literal.append(token)
  1591. if _check_for_early_dl_end(value, domain_literal):
  1592. return domain_literal, value
  1593. if value[0] in WSP:
  1594. token, value = get_fws(value)
  1595. domain_literal.append(token)
  1596. if _check_for_early_dl_end(value, domain_literal):
  1597. return domain_literal, value
  1598. if value[0] != ']':
  1599. raise errors.HeaderParseError("expected ']' at end of domain-literal "
  1600. "but found '{}'".format(value))
  1601. domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
  1602. value = value[1:]
  1603. if value and value[0] in CFWS_LEADER:
  1604. token, value = get_cfws(value)
  1605. domain_literal.append(token)
  1606. return domain_literal, value
  1607. def get_domain(value):
  1608. """ domain = dot-atom / domain-literal / obs-domain
  1609. obs-domain = atom *("." atom))
  1610. """
  1611. domain = Domain()
  1612. leader = None
  1613. if value[0] in CFWS_LEADER:
  1614. leader, value = get_cfws(value)
  1615. if not value:
  1616. raise errors.HeaderParseError(
  1617. "expected domain but found '{}'".format(value))
  1618. if value[0] == '[':
  1619. token, value = get_domain_literal(value)
  1620. if leader is not None:
  1621. token[:0] = [leader]
  1622. domain.append(token)
  1623. return domain, value
  1624. try:
  1625. token, value = get_dot_atom(value)
  1626. except errors.HeaderParseError:
  1627. token, value = get_atom(value)
  1628. if leader is not None:
  1629. token[:0] = [leader]
  1630. domain.append(token)
  1631. if value and value[0] == '.':
  1632. domain.defects.append(errors.ObsoleteHeaderDefect(
  1633. "domain is not a dot-atom (contains CFWS)"))
  1634. if domain[0].token_type == 'dot-atom':
  1635. domain[:] = domain[0]
  1636. while value and value[0] == '.':
  1637. domain.append(DOT)
  1638. token, value = get_atom(value[1:])
  1639. domain.append(token)
  1640. return domain, value
  1641. def get_addr_spec(value):
  1642. """ addr-spec = local-part "@" domain
  1643. """
  1644. addr_spec = AddrSpec()
  1645. token, value = get_local_part(value)
  1646. addr_spec.append(token)
  1647. if not value or value[0] != '@':
  1648. addr_spec.defects.append(errors.InvalidHeaderDefect(
  1649. "add-spec local part with no domain"))
  1650. return addr_spec, value
  1651. addr_spec.append(ValueTerminal('@', 'address-at-symbol'))
  1652. token, value = get_domain(value[1:])
  1653. addr_spec.append(token)
  1654. return addr_spec, value
  1655. def get_obs_route(value):
  1656. """ obs-route = obs-domain-list ":"
  1657. obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain])
  1658. Returns an obs-route token with the appropriate sub-tokens (that is,
  1659. there is no obs-domain-list in the parse tree).
  1660. """
  1661. obs_route = ObsRoute()
  1662. while value and (value[0]==',' or value[0] in CFWS_LEADER):
  1663. if value[0] in CFWS_LEADER:
  1664. token, value = get_cfws(value)
  1665. obs_route.append(token)
  1666. elif value[0] == ',':
  1667. obs_route.append(ListSeparator)
  1668. value = value[1:]
  1669. if not value or value[0] != '@':
  1670. raise errors.HeaderParseError(
  1671. "expected obs-route domain but found '{}'".format(value))
  1672. obs_route.append(RouteComponentMarker)
  1673. token, value = get_domain(value[1:])
  1674. obs_route.append(token)
  1675. while value and value[0]==',':
  1676. obs_route.append(ListSeparator)
  1677. value = value[1:]
  1678. if not value:
  1679. break
  1680. if value[0] in CFWS_LEADER:
  1681. token, value = get_cfws(value)
  1682. obs_route.append(token)
  1683. if value[0] == '@':
  1684. obs_route.append(RouteComponentMarker)
  1685. token, value = get_domain(value[1:])
  1686. obs_route.append(token)
  1687. if not value:
  1688. raise errors.HeaderParseError("end of header while parsing obs-route")
  1689. if value[0] != ':':
  1690. raise errors.HeaderParseError( "expected ':' marking end of "
  1691. "obs-route but found '{}'".format(value))
  1692. obs_route.append(ValueTerminal(':', 'end-of-obs-route-marker'))
  1693. return obs_route, value[1:]
  1694. def get_angle_addr(value):
  1695. """ angle-addr = [CFWS] "<" addr-spec ">" [CFWS] / obs-angle-addr
  1696. obs-angle-addr = [CFWS] "<" obs-route addr-spec ">" [CFWS]
  1697. """
  1698. angle_addr = AngleAddr()
  1699. if value[0] in CFWS_LEADER:
  1700. token, value = get_cfws(value)
  1701. angle_addr.append(token)
  1702. if not value or value[0] != '<':
  1703. raise errors.HeaderParseError(
  1704. "expected angle-addr but found '{}'".format(value))
  1705. angle_addr.append(ValueTerminal('<', 'angle-addr-start'))
  1706. value = value[1:]
  1707. # Although it is not legal per RFC5322, SMTP uses '<>' in certain
  1708. # circumstances.
  1709. if value[0] == '>':
  1710. angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
  1711. angle_addr.defects.append(errors.InvalidHeaderDefect(
  1712. "null addr-spec in angle-addr"))
  1713. value = value[1:]
  1714. return angle_addr, value
  1715. try:
  1716. token, value = get_addr_spec(value)
  1717. except errors.HeaderParseError:
  1718. try:
  1719. token, value = get_obs_route(value)
  1720. angle_addr.defects.append(errors.ObsoleteHeaderDefect(
  1721. "obsolete route specification in angle-addr"))
  1722. except errors.HeaderParseError:
  1723. raise errors.HeaderParseError(
  1724. "expected addr-spec or obs-route but found '{}'".format(value))
  1725. angle_addr.append(token)
  1726. token, value = get_addr_spec(value)
  1727. angle_addr.append(token)
  1728. if value and value[0] == '>':
  1729. value = value[1:]
  1730. else:
  1731. angle_addr.defects.append(errors.InvalidHeaderDefect(
  1732. "missing trailing '>' on angle-addr"))
  1733. angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
  1734. if value and value[0] in CFWS_LEADER:
  1735. token, value = get_cfws(value)
  1736. angle_addr.append(token)
  1737. return angle_addr, value
  1738. def get_display_name(value):
  1739. """ display-name = phrase
  1740. Because this is simply a name-rule, we don't return a display-name
  1741. token containing a phrase, but rather a display-name token with
  1742. the content of the phrase.
  1743. """
  1744. display_name = DisplayName()
  1745. token, value = get_phrase(value)
  1746. display_name.extend(token[:])
  1747. display_name.defects = token.defects[:]
  1748. return display_name, value
  1749. def get_name_addr(value):
  1750. """ name-addr = [display-name] angle-addr
  1751. """
  1752. name_addr = NameAddr()
  1753. # Both the optional display name and the angle-addr can start with cfws.
  1754. leader = None
  1755. if value[0] in CFWS_LEADER:
  1756. leader, value = get_cfws(value)
  1757. if not value:
  1758. raise errors.HeaderParseError(
  1759. "expected name-addr but found '{}'".format(leader))
  1760. if value[0] != '<':
  1761. if value[0] in PHRASE_ENDS:
  1762. raise errors.HeaderParseError(
  1763. "expected name-addr but found '{}'".format(value))
  1764. token, value = get_display_name(value)
  1765. if not value:
  1766. raise errors.HeaderParseError(
  1767. "expected name-addr but found '{}'".format(token))
  1768. if leader is not None:
  1769. token[0][:0] = [leader]
  1770. leader = None
  1771. name_addr.append(token)
  1772. token, value = get_angle_addr(value)
  1773. if leader is not None:
  1774. token[:0] = [leader]
  1775. name_addr.append(token)
  1776. return name_addr, value
  1777. def get_mailbox(value):
  1778. """ mailbox = name-addr / addr-spec
  1779. """
  1780. # The only way to figure out if we are dealing with a name-addr or an
  1781. # addr-spec is to try parsing each one.
  1782. mailbox = Mailbox()
  1783. try:
  1784. token, value = get_name_addr(value)
  1785. except errors.HeaderParseError:
  1786. try:
  1787. token, value = get_addr_spec(value)
  1788. except errors.HeaderParseError:
  1789. raise errors.HeaderParseError(
  1790. "expected mailbox but found '{}'".format(value))
  1791. if any(isinstance(x, errors.InvalidHeaderDefect)
  1792. for x in token.all_defects):
  1793. mailbox.token_type = 'invalid-mailbox'
  1794. mailbox.append(token)
  1795. return mailbox, value
  1796. def get_invalid_mailbox(value, endchars):
  1797. """ Read everything up to one of the chars in endchars.
  1798. This is outside the formal grammar. The InvalidMailbox TokenList that is
  1799. returned acts like a Mailbox, but the data attributes are None.
  1800. """
  1801. invalid_mailbox = InvalidMailbox()
  1802. while value and value[0] not in endchars:
  1803. if value[0] in PHRASE_ENDS:
  1804. invalid_mailbox.append(ValueTerminal(value[0],
  1805. 'misplaced-special'))
  1806. value = value[1:]
  1807. else:
  1808. token, value = get_phrase(value)
  1809. invalid_mailbox.append(token)
  1810. return invalid_mailbox, value
  1811. def get_mailbox_list(value):
  1812. """ mailbox-list = (mailbox *("," mailbox)) / obs-mbox-list
  1813. obs-mbox-list = *([CFWS] ",") mailbox *("," [mailbox / CFWS])
  1814. For this routine we go outside the formal grammar in order to improve error
  1815. handling. We recognize the end of the mailbox list only at the end of the
  1816. value or at a ';' (the group terminator). This is so that we can turn
  1817. invalid mailboxes into InvalidMailbox tokens and continue parsing any
  1818. remaining valid mailboxes. We also allow all mailbox entries to be null,
  1819. and this condition is handled appropriately at a higher level.
  1820. """
  1821. mailbox_list = MailboxList()
  1822. while value and value[0] != ';':
  1823. try:
  1824. token, value = get_mailbox(value)
  1825. mailbox_list.append(token)
  1826. except errors.HeaderParseError:
  1827. leader = None
  1828. if value[0] in CFWS_LEADER:
  1829. leader, value = get_cfws(value)
  1830. if not value or value[0] in ',;':
  1831. mailbox_list.append(leader)
  1832. mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
  1833. "empty element in mailbox-list"))
  1834. else:
  1835. token, value = get_invalid_mailbox(value, ',;')
  1836. if leader is not None:
  1837. token[:0] = [leader]
  1838. mailbox_list.append(token)
  1839. mailbox_list.defects.append(errors.InvalidHeaderDefect(
  1840. "invalid mailbox in mailbox-list"))
  1841. elif value[0] == ',':
  1842. mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
  1843. "empty element in mailbox-list"))
  1844. else:
  1845. token, value = get_invalid_mailbox(value, ',;')
  1846. if leader is not None:
  1847. token[:0] = [leader]
  1848. mailbox_list.append(token)
  1849. mailbox_list.defects.append(errors.InvalidHeaderDefect(
  1850. "invalid mailbox in mailbox-list"))
  1851. if value and value[0] not in ',;':
  1852. # Crap after mailbox; treat it as an invalid mailbox.
  1853. # The mailbox info will still be available.
  1854. mailbox = mailbox_list[-1]
  1855. mailbox.token_type = 'invalid-mailbox'
  1856. token, value = get_invalid_mailbox(value, ',;')
  1857. mailbox.extend(token)
  1858. mailbox_list.defects.append(errors.InvalidHeaderDefect(
  1859. "invalid mailbox in mailbox-list"))
  1860. if value and value[0] == ',':
  1861. mailbox_list.append(ListSeparator)
  1862. value = value[1:]
  1863. return mailbox_list, value
  1864. def get_group_list(value):
  1865. """ group-list = mailbox-list / CFWS / obs-group-list
  1866. obs-group-list = 1*([CFWS] ",") [CFWS]
  1867. """
  1868. group_list = GroupList()
  1869. if not value:
  1870. group_list.defects.append(errors.InvalidHeaderDefect(
  1871. "end of header before group-list"))
  1872. return group_list, value
  1873. leader = None
  1874. if value and value[0] in CFWS_LEADER:
  1875. leader, value = get_cfws(value)
  1876. if not value:
  1877. # This should never happen in email parsing, since CFWS-only is a
  1878. # legal alternative to group-list in a group, which is the only
  1879. # place group-list appears.
  1880. group_list.defects.append(errors.InvalidHeaderDefect(
  1881. "end of header in group-list"))
  1882. group_list.append(leader)
  1883. return group_list, value
  1884. if value[0] == ';':
  1885. group_list.append(leader)
  1886. return group_list, value
  1887. token, value = get_mailbox_list(value)
  1888. if len(token.all_mailboxes)==0:
  1889. if leader is not None:
  1890. group_list.append(leader)
  1891. group_list.extend(token)
  1892. group_list.defects.append(errors.ObsoleteHeaderDefect(
  1893. "group-list with empty entries"))
  1894. return group_list, value
  1895. if leader is not None:
  1896. token[:0] = [leader]
  1897. group_list.append(token)
  1898. return group_list, value
  1899. def get_group(value):
  1900. """ group = display-name ":" [group-list] ";" [CFWS]
  1901. """
  1902. group = Group()
  1903. token, value = get_display_name(value)
  1904. if not value or value[0] != ':':
  1905. raise errors.HeaderParseError("expected ':' at end of group "
  1906. "display name but found '{}'".format(value))
  1907. group.append(token)
  1908. group.append(ValueTerminal(':', 'group-display-name-terminator'))
  1909. value = value[1:]
  1910. if value and value[0] == ';':
  1911. group.append(ValueTerminal(';', 'group-terminator'))
  1912. return group, value[1:]
  1913. token, value = get_group_list(value)
  1914. group.append(token)
  1915. if not value:
  1916. group.defects.append(errors.InvalidHeaderDefect(
  1917. "end of header in group"))
  1918. if value[0] != ';':
  1919. raise errors.HeaderParseError(
  1920. "expected ';' at end of group but found {}".format(value))
  1921. group.append(ValueTerminal(';', 'group-terminator'))
  1922. value = value[1:]
  1923. if value and value[0] in CFWS_LEADER:
  1924. token, value = get_cfws(value)
  1925. group.append(token)
  1926. return group, value
  1927. def get_address(value):
  1928. """ address = mailbox / group
  1929. Note that counter-intuitively, an address can be either a single address or
  1930. a list of addresses (a group). This is why the returned Address object has
  1931. a 'mailboxes' attribute which treats a single address as a list of length
  1932. one. When you need to differentiate between to two cases, extract the single
  1933. element, which is either a mailbox or a group token.
  1934. """
  1935. # The formal grammar isn't very helpful when parsing an address. mailbox
  1936. # and group, especially when allowing for obsolete forms, start off very
  1937. # similarly. It is only when you reach one of @, <, or : that you know
  1938. # what you've got. So, we try each one in turn, starting with the more
  1939. # likely of the two. We could perhaps make this more efficient by looking
  1940. # for a phrase and then branching based on the next character, but that
  1941. # would be a premature optimization.
  1942. address = Address()
  1943. try:
  1944. token, value = get_group(value)
  1945. except errors.HeaderParseError:
  1946. try:
  1947. token, value = get_mailbox(value)
  1948. except errors.HeaderParseError:
  1949. raise errors.HeaderParseError(
  1950. "expected address but found '{}'".format(value))
  1951. address.append(token)
  1952. return address, value
  1953. def get_address_list(value):
  1954. """ address_list = (address *("," address)) / obs-addr-list
  1955. obs-addr-list = *([CFWS] ",") address *("," [address / CFWS])
  1956. We depart from the formal grammar here by continuing to parse until the end
  1957. of the input, assuming the input to be entirely composed of an
  1958. address-list. This is always true in email parsing, and allows us
  1959. to skip invalid addresses to parse additional valid ones.
  1960. """
  1961. address_list = AddressList()
  1962. while value:
  1963. try:
  1964. token, value = get_address(value)
  1965. address_list.append(token)
  1966. except errors.HeaderParseError as err:
  1967. leader = None
  1968. if value[0] in CFWS_LEADER:
  1969. leader, value = get_cfws(value)
  1970. if not value or value[0] == ',':
  1971. address_list.append(leader)
  1972. address_list.defects.append(errors.ObsoleteHeaderDefect(
  1973. "address-list entry with no content"))
  1974. else:
  1975. token, value = get_invalid_mailbox(value, ',')
  1976. if leader is not None:
  1977. token[:0] = [leader]
  1978. address_list.append(Address([token]))
  1979. address_list.defects.append(errors.InvalidHeaderDefect(
  1980. "invalid address in address-list"))
  1981. elif value[0] == ',':
  1982. address_list.defects.append(errors.ObsoleteHeaderDefect(
  1983. "empty element in address-list"))
  1984. else:
  1985. token, value = get_invalid_mailbox(value, ',')
  1986. if leader is not None:
  1987. token[:0] = [leader]
  1988. address_list.append(Address([token]))
  1989. address_list.defects.append(errors.InvalidHeaderDefect(
  1990. "invalid address in address-list"))
  1991. if value and value[0] != ',':
  1992. # Crap after address; treat it as an invalid mailbox.
  1993. # The mailbox info will still be available.
  1994. mailbox = address_list[-1][0]
  1995. mailbox.token_type = 'invalid-mailbox'
  1996. token, value = get_invalid_mailbox(value, ',')
  1997. mailbox.extend(token)
  1998. address_list.defects.append(errors.InvalidHeaderDefect(
  1999. "invalid address in address-list"))
  2000. if value: # Must be a , at this point.
  2001. address_list.append(ValueTerminal(',', 'list-separator'))
  2002. value = value[1:]
  2003. return address_list, value
  2004. #
  2005. # XXX: As I begin to add additional header parsers, I'm realizing we probably
  2006. # have two level of parser routines: the get_XXX methods that get a token in
  2007. # the grammar, and parse_XXX methods that parse an entire field value. So
  2008. # get_address_list above should really be a parse_ method, as probably should
  2009. # be get_unstructured.
  2010. #
  2011. def parse_mime_version(value):
  2012. """ mime-version = [CFWS] 1*digit [CFWS] "." [CFWS] 1*digit [CFWS]
  2013. """
  2014. # The [CFWS] is implicit in the RFC 2045 BNF.
  2015. # XXX: This routine is a bit verbose, should factor out a get_int method.
  2016. mime_version = MIMEVersion()
  2017. if not value:
  2018. mime_version.defects.append(errors.HeaderMissingRequiredValue(
  2019. "Missing MIME version number (eg: 1.0)"))
  2020. return mime_version
  2021. if value[0] in CFWS_LEADER:
  2022. token, value = get_cfws(value)
  2023. mime_version.append(token)
  2024. if not value:
  2025. mime_version.defects.append(errors.HeaderMissingRequiredValue(
  2026. "Expected MIME version number but found only CFWS"))
  2027. digits = ''
  2028. while value and value[0] != '.' and value[0] not in CFWS_LEADER:
  2029. digits += value[0]
  2030. value = value[1:]
  2031. if not digits.isdigit():
  2032. mime_version.defects.append(errors.InvalidHeaderDefect(
  2033. "Expected MIME major version number but found {!r}".format(digits)))
  2034. mime_version.append(ValueTerminal(digits, 'xtext'))
  2035. else:
  2036. mime_version.major = int(digits)
  2037. mime_version.append(ValueTerminal(digits, 'digits'))
  2038. if value and value[0] in CFWS_LEADER:
  2039. token, value = get_cfws(value)
  2040. mime_version.append(token)
  2041. if not value or value[0] != '.':
  2042. if mime_version.major is not None:
  2043. mime_version.defects.append(errors.InvalidHeaderDefect(
  2044. "Incomplete MIME version; found only major number"))
  2045. if value:
  2046. mime_version.append(ValueTerminal(value, 'xtext'))
  2047. return mime_version
  2048. mime_version.append(ValueTerminal('.', 'version-separator'))
  2049. value = value[1:]
  2050. if value and value[0] in CFWS_LEADER:
  2051. token, value = get_cfws(value)
  2052. mime_version.append(token)
  2053. if not value:
  2054. if mime_version.major is not None:
  2055. mime_version.defects.append(errors.InvalidHeaderDefect(
  2056. "Incomplete MIME version; found only major number"))
  2057. return mime_version
  2058. digits = ''
  2059. while value and value[0] not in CFWS_LEADER:
  2060. digits += value[0]
  2061. value = value[1:]
  2062. if not digits.isdigit():
  2063. mime_version.defects.append(errors.InvalidHeaderDefect(
  2064. "Expected MIME minor version number but found {!r}".format(digits)))
  2065. mime_version.append(ValueTerminal(digits, 'xtext'))
  2066. else:
  2067. mime_version.minor = int(digits)
  2068. mime_version.append(ValueTerminal(digits, 'digits'))
  2069. if value and value[0] in CFWS_LEADER:
  2070. token, value = get_cfws(value)
  2071. mime_version.append(token)
  2072. if value:
  2073. mime_version.defects.append(errors.InvalidHeaderDefect(
  2074. "Excess non-CFWS text after MIME version"))
  2075. mime_version.append(ValueTerminal(value, 'xtext'))
  2076. return mime_version
  2077. def get_invalid_parameter(value):
  2078. """ Read everything up to the next ';'.
  2079. This is outside the formal grammar. The InvalidParameter TokenList that is
  2080. returned acts like a Parameter, but the data attributes are None.
  2081. """
  2082. invalid_parameter = InvalidParameter()
  2083. while value and value[0] != ';':
  2084. if value[0] in PHRASE_ENDS:
  2085. invalid_parameter.append(ValueTerminal(value[0],
  2086. 'misplaced-special'))
  2087. value = value[1:]
  2088. else:
  2089. token, value = get_phrase(value)
  2090. invalid_parameter.append(token)
  2091. return invalid_parameter, value
  2092. def get_ttext(value):
  2093. """ttext = <matches _ttext_matcher>
  2094. We allow any non-TOKEN_ENDS in ttext, but add defects to the token's
  2095. defects list if we find non-ttext characters. We also register defects for
  2096. *any* non-printables even though the RFC doesn't exclude all of them,
  2097. because we follow the spirit of RFC 5322.
  2098. """
  2099. m = _non_token_end_matcher(value)
  2100. if not m:
  2101. raise errors.HeaderParseError(
  2102. "expected ttext but found '{}'".format(value))
  2103. ttext = m.group()
  2104. value = value[len(ttext):]
  2105. ttext = ValueTerminal(ttext, 'ttext')
  2106. _validate_xtext(ttext)
  2107. return ttext, value
  2108. def get_token(value):
  2109. """token = [CFWS] 1*ttext [CFWS]
  2110. The RFC equivalent of ttext is any US-ASCII chars except space, ctls, or
  2111. tspecials. We also exclude tabs even though the RFC doesn't.
  2112. The RFC implies the CFWS but is not explicit about it in the BNF.
  2113. """
  2114. mtoken = Token()
  2115. if value and value[0] in CFWS_LEADER:
  2116. token, value = get_cfws(value)
  2117. mtoken.append(token)
  2118. if value and value[0] in TOKEN_ENDS:
  2119. raise errors.HeaderParseError(
  2120. "expected token but found '{}'".format(value))
  2121. token, value = get_ttext(value)
  2122. mtoken.append(token)
  2123. if value and value[0] in CFWS_LEADER:
  2124. token, value = get_cfws(value)
  2125. mtoken.append(token)
  2126. return mtoken, value
  2127. def get_attrtext(value):
  2128. """attrtext = 1*(any non-ATTRIBUTE_ENDS character)
  2129. We allow any non-ATTRIBUTE_ENDS in attrtext, but add defects to the
  2130. token's defects list if we find non-attrtext characters. We also register
  2131. defects for *any* non-printables even though the RFC doesn't exclude all of
  2132. them, because we follow the spirit of RFC 5322.
  2133. """
  2134. m = _non_attribute_end_matcher(value)
  2135. if not m:
  2136. raise errors.HeaderParseError(
  2137. "expected attrtext but found {!r}".format(value))
  2138. attrtext = m.group()
  2139. value = value[len(attrtext):]
  2140. attrtext = ValueTerminal(attrtext, 'attrtext')
  2141. _validate_xtext(attrtext)
  2142. return attrtext, value
  2143. def get_attribute(value):
  2144. """ [CFWS] 1*attrtext [CFWS]
  2145. This version of the BNF makes the CFWS explicit, and as usual we use a
  2146. value terminal for the actual run of characters. The RFC equivalent of
  2147. attrtext is the token characters, with the subtraction of '*', "'", and '%'.
  2148. We include tab in the excluded set just as we do for token.
  2149. """
  2150. attribute = Attribute()
  2151. if value and value[0] in CFWS_LEADER:
  2152. token, value = get_cfws(value)
  2153. attribute.append(token)
  2154. if value and value[0] in ATTRIBUTE_ENDS:
  2155. raise errors.HeaderParseError(
  2156. "expected token but found '{}'".format(value))
  2157. token, value = get_attrtext(value)
  2158. attribute.append(token)
  2159. if value and value[0] in CFWS_LEADER:
  2160. token, value = get_cfws(value)
  2161. attribute.append(token)
  2162. return attribute, value
  2163. def get_extended_attrtext(value):
  2164. """attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%')
  2165. This is a special parsing routine so that we get a value that
  2166. includes % escapes as a single string (which we decode as a single
  2167. string later).
  2168. """
  2169. m = _non_extended_attribute_end_matcher(value)
  2170. if not m:
  2171. raise errors.HeaderParseError(
  2172. "expected extended attrtext but found {!r}".format(value))
  2173. attrtext = m.group()
  2174. value = value[len(attrtext):]
  2175. attrtext = ValueTerminal(attrtext, 'extended-attrtext')
  2176. _validate_xtext(attrtext)
  2177. return attrtext, value
  2178. def get_extended_attribute(value):
  2179. """ [CFWS] 1*extended_attrtext [CFWS]
  2180. This is like the non-extended version except we allow % characters, so that
  2181. we can pick up an encoded value as a single string.
  2182. """
  2183. # XXX: should we have an ExtendedAttribute TokenList?
  2184. attribute = Attribute()
  2185. if value and value[0] in CFWS_LEADER:
  2186. token, value = get_cfws(value)
  2187. attribute.append(token)
  2188. if value and value[0] in EXTENDED_ATTRIBUTE_ENDS:
  2189. raise errors.HeaderParseError(
  2190. "expected token but found '{}'".format(value))
  2191. token, value = get_extended_attrtext(value)
  2192. attribute.append(token)
  2193. if value and value[0] in CFWS_LEADER:
  2194. token, value = get_cfws(value)
  2195. attribute.append(token)
  2196. return attribute, value
  2197. def get_section(value):
  2198. """ '*' digits
  2199. The formal BNF is more complicated because leading 0s are not allowed. We
  2200. check for that and add a defect. We also assume no CFWS is allowed between
  2201. the '*' and the digits, though the RFC is not crystal clear on that.
  2202. The caller should already have dealt with leading CFWS.
  2203. """
  2204. section = Section()
  2205. if not value or value[0] != '*':
  2206. raise errors.HeaderParseError("Expected section but found {}".format(
  2207. value))
  2208. section.append(ValueTerminal('*', 'section-marker'))
  2209. value = value[1:]
  2210. if not value or not value[0].isdigit():
  2211. raise errors.HeaderParseError("Expected section number but "
  2212. "found {}".format(value))
  2213. digits = ''
  2214. while value and value[0].isdigit():
  2215. digits += value[0]
  2216. value = value[1:]
  2217. if digits[0] == '0' and digits != '0':
  2218. section.defects.append(errors.InvalidHeaderError("section number"
  2219. "has an invalid leading 0"))
  2220. section.number = int(digits)
  2221. section.append(ValueTerminal(digits, 'digits'))
  2222. return section, value
  2223. def get_value(value):
  2224. """ quoted-string / attribute
  2225. """
  2226. v = Value()
  2227. if not value:
  2228. raise errors.HeaderParseError("Expected value but found end of string")
  2229. leader = None
  2230. if value[0] in CFWS_LEADER:
  2231. leader, value = get_cfws(value)
  2232. if not value:
  2233. raise errors.HeaderParseError("Expected value but found "
  2234. "only {}".format(leader))
  2235. if value[0] == '"':
  2236. token, value = get_quoted_string(value)
  2237. else:
  2238. token, value = get_extended_attribute(value)
  2239. if leader is not None:
  2240. token[:0] = [leader]
  2241. v.append(token)
  2242. return v, value
  2243. def get_parameter(value):
  2244. """ attribute [section] ["*"] [CFWS] "=" value
  2245. The CFWS is implied by the RFC but not made explicit in the BNF. This
  2246. simplified form of the BNF from the RFC is made to conform with the RFC BNF
  2247. through some extra checks. We do it this way because it makes both error
  2248. recovery and working with the resulting parse tree easier.
  2249. """
  2250. # It is possible CFWS would also be implicitly allowed between the section
  2251. # and the 'extended-attribute' marker (the '*') , but we've never seen that
  2252. # in the wild and we will therefore ignore the possibility.
  2253. param = Parameter()
  2254. token, value = get_attribute(value)
  2255. param.append(token)
  2256. if not value or value[0] == ';':
  2257. param.defects.append(errors.InvalidHeaderDefect("Parameter contains "
  2258. "name ({}) but no value".format(token)))
  2259. return param, value
  2260. if value[0] == '*':
  2261. try:
  2262. token, value = get_section(value)
  2263. param.sectioned = True
  2264. param.append(token)
  2265. except errors.HeaderParseError:
  2266. pass
  2267. if not value:
  2268. raise errors.HeaderParseError("Incomplete parameter")
  2269. if value[0] == '*':
  2270. param.append(ValueTerminal('*', 'extended-parameter-marker'))
  2271. value = value[1:]
  2272. param.extended = True
  2273. if value[0] != '=':
  2274. raise errors.HeaderParseError("Parameter not followed by '='")
  2275. param.append(ValueTerminal('=', 'parameter-separator'))
  2276. value = value[1:]
  2277. leader = None
  2278. if value and value[0] in CFWS_LEADER:
  2279. token, value = get_cfws(value)
  2280. param.append(token)
  2281. remainder = None
  2282. appendto = param
  2283. if param.extended and value and value[0] == '"':
  2284. # Now for some serious hackery to handle the common invalid case of
  2285. # double quotes around an extended value. We also accept (with defect)
  2286. # a value marked as encoded that isn't really.
  2287. qstring, remainder = get_quoted_string(value)
  2288. inner_value = qstring.stripped_value
  2289. semi_valid = False
  2290. if param.section_number == 0:
  2291. if inner_value and inner_value[0] == "'":
  2292. semi_valid = True
  2293. else:
  2294. token, rest = get_attrtext(inner_value)
  2295. if rest and rest[0] == "'":
  2296. semi_valid = True
  2297. else:
  2298. try:
  2299. token, rest = get_extended_attrtext(inner_value)
  2300. except:
  2301. pass
  2302. else:
  2303. if not rest:
  2304. semi_valid = True
  2305. if semi_valid:
  2306. param.defects.append(errors.InvalidHeaderDefect(
  2307. "Quoted string value for extended parameter is invalid"))
  2308. param.append(qstring)
  2309. for t in qstring:
  2310. if t.token_type == 'bare-quoted-string':
  2311. t[:] = []
  2312. appendto = t
  2313. break
  2314. value = inner_value
  2315. else:
  2316. remainder = None
  2317. param.defects.append(errors.InvalidHeaderDefect(
  2318. "Parameter marked as extended but appears to have a "
  2319. "quoted string value that is non-encoded"))
  2320. if value and value[0] == "'":
  2321. token = None
  2322. else:
  2323. token, value = get_value(value)
  2324. if not param.extended or param.section_number > 0:
  2325. if not value or value[0] != "'":
  2326. appendto.append(token)
  2327. if remainder is not None:
  2328. assert not value, value
  2329. value = remainder
  2330. return param, value
  2331. param.defects.append(errors.InvalidHeaderDefect(
  2332. "Apparent initial-extended-value but attribute "
  2333. "was not marked as extended or was not initial section"))
  2334. if not value:
  2335. # Assume the charset/lang is missing and the token is the value.
  2336. param.defects.append(errors.InvalidHeaderDefect(
  2337. "Missing required charset/lang delimiters"))
  2338. appendto.append(token)
  2339. if remainder is None:
  2340. return param, value
  2341. else:
  2342. if token is not None:
  2343. for t in token:
  2344. if t.token_type == 'extended-attrtext':
  2345. break
  2346. t.token_type == 'attrtext'
  2347. appendto.append(t)
  2348. param.charset = t.value
  2349. if value[0] != "'":
  2350. raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
  2351. "delimiter, but found {!r}".format(value))
  2352. appendto.append(ValueTerminal("'", 'RFC2231 delimiter'))
  2353. value = value[1:]
  2354. if value and value[0] != "'":
  2355. token, value = get_attrtext(value)
  2356. appendto.append(token)
  2357. param.lang = token.value
  2358. if not value or value[0] != "'":
  2359. raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
  2360. "delimiter, but found {}".format(value))
  2361. appendto.append(ValueTerminal("'", 'RFC2231 delimiter'))
  2362. value = value[1:]
  2363. if remainder is not None:
  2364. # Treat the rest of value as bare quoted string content.
  2365. v = Value()
  2366. while value:
  2367. if value[0] in WSP:
  2368. token, value = get_fws(value)
  2369. else:
  2370. token, value = get_qcontent(value)
  2371. v.append(token)
  2372. token = v
  2373. else:
  2374. token, value = get_value(value)
  2375. appendto.append(token)
  2376. if remainder is not None:
  2377. assert not value, value
  2378. value = remainder
  2379. return param, value
  2380. def parse_mime_parameters(value):
  2381. """ parameter *( ";" parameter )
  2382. That BNF is meant to indicate this routine should only be called after
  2383. finding and handling the leading ';'. There is no corresponding rule in
  2384. the formal RFC grammar, but it is more convenient for us for the set of
  2385. parameters to be treated as its own TokenList.
  2386. This is 'parse' routine because it consumes the reminaing value, but it
  2387. would never be called to parse a full header. Instead it is called to
  2388. parse everything after the non-parameter value of a specific MIME header.
  2389. """
  2390. mime_parameters = MimeParameters()
  2391. while value:
  2392. try:
  2393. token, value = get_parameter(value)
  2394. mime_parameters.append(token)
  2395. except errors.HeaderParseError as err:
  2396. leader = None
  2397. if value[0] in CFWS_LEADER:
  2398. leader, value = get_cfws(value)
  2399. if not value:
  2400. mime_parameters.append(leader)
  2401. return mime_parameters
  2402. if value[0] == ';':
  2403. if leader is not None:
  2404. mime_parameters.append(leader)
  2405. mime_parameters.defects.append(errors.InvalidHeaderDefect(
  2406. "parameter entry with no content"))
  2407. else:
  2408. token, value = get_invalid_parameter(value)
  2409. if leader:
  2410. token[:0] = [leader]
  2411. mime_parameters.append(token)
  2412. mime_parameters.defects.append(errors.InvalidHeaderDefect(
  2413. "invalid parameter {!r}".format(token)))
  2414. if value and value[0] != ';':
  2415. # Junk after the otherwise valid parameter. Mark it as
  2416. # invalid, but it will have a value.
  2417. param = mime_parameters[-1]
  2418. param.token_type = 'invalid-parameter'
  2419. token, value = get_invalid_parameter(value)
  2420. param.extend(token)
  2421. mime_parameters.defects.append(errors.InvalidHeaderDefect(
  2422. "parameter with invalid trailing text {!r}".format(token)))
  2423. if value:
  2424. # Must be a ';' at this point.
  2425. mime_parameters.append(ValueTerminal(';', 'parameter-separator'))
  2426. value = value[1:]
  2427. return mime_parameters
  2428. def _find_mime_parameters(tokenlist, value):
  2429. """Do our best to find the parameters in an invalid MIME header
  2430. """
  2431. while value and value[0] != ';':
  2432. if value[0] in PHRASE_ENDS:
  2433. tokenlist.append(ValueTerminal(value[0], 'misplaced-special'))
  2434. value = value[1:]
  2435. else:
  2436. token, value = get_phrase(value)
  2437. tokenlist.append(token)
  2438. if not value:
  2439. return
  2440. tokenlist.append(ValueTerminal(';', 'parameter-separator'))
  2441. tokenlist.append(parse_mime_parameters(value[1:]))
  2442. def parse_content_type_header(value):
  2443. """ maintype "/" subtype *( ";" parameter )
  2444. The maintype and substype are tokens. Theoretically they could
  2445. be checked against the official IANA list + x-token, but we
  2446. don't do that.
  2447. """
  2448. ctype = ContentType()
  2449. recover = False
  2450. if not value:
  2451. ctype.defects.append(errors.HeaderMissingRequiredValue(
  2452. "Missing content type specification"))
  2453. return ctype
  2454. try:
  2455. token, value = get_token(value)
  2456. except errors.HeaderParseError:
  2457. ctype.defects.append(errors.InvalidHeaderDefect(
  2458. "Expected content maintype but found {!r}".format(value)))
  2459. _find_mime_parameters(ctype, value)
  2460. return ctype
  2461. ctype.append(token)
  2462. # XXX: If we really want to follow the formal grammer we should make
  2463. # mantype and subtype specialized TokenLists here. Probably not worth it.
  2464. if not value or value[0] != '/':
  2465. ctype.defects.append(errors.InvalidHeaderDefect(
  2466. "Invalid content type"))
  2467. if value:
  2468. _find_mime_parameters(ctype, value)
  2469. return ctype
  2470. ctype.maintype = token.value.strip().lower()
  2471. ctype.append(ValueTerminal('/', 'content-type-separator'))
  2472. value = value[1:]
  2473. try:
  2474. token, value = get_token(value)
  2475. except errors.HeaderParseError:
  2476. ctype.defects.append(errors.InvalidHeaderDefect(
  2477. "Expected content subtype but found {!r}".format(value)))
  2478. _find_mime_parameters(ctype, value)
  2479. return ctype
  2480. ctype.append(token)
  2481. ctype.subtype = token.value.strip().lower()
  2482. if not value:
  2483. return ctype
  2484. if value[0] != ';':
  2485. ctype.defects.append(errors.InvalidHeaderDefect(
  2486. "Only parameters are valid after content type, but "
  2487. "found {!r}".format(value)))
  2488. # The RFC requires that a syntactically invalid content-type be treated
  2489. # as text/plain. Perhaps we should postel this, but we should probably
  2490. # only do that if we were checking the subtype value against IANA.
  2491. del ctype.maintype, ctype.subtype
  2492. _find_mime_parameters(ctype, value)
  2493. return ctype
  2494. ctype.append(ValueTerminal(';', 'parameter-separator'))
  2495. ctype.append(parse_mime_parameters(value[1:]))
  2496. return ctype
  2497. def parse_content_disposition_header(value):
  2498. """ disposition-type *( ";" parameter )
  2499. """
  2500. disp_header = ContentDisposition()
  2501. if not value:
  2502. disp_header.defects.append(errors.HeaderMissingRequiredValue(
  2503. "Missing content disposition"))
  2504. return disp_header
  2505. try:
  2506. token, value = get_token(value)
  2507. except errors.HeaderParseError:
  2508. ctype.defects.append(errors.InvalidHeaderDefect(
  2509. "Expected content disposition but found {!r}".format(value)))
  2510. _find_mime_parameters(disp_header, value)
  2511. return disp_header
  2512. disp_header.append(token)
  2513. disp_header.content_disposition = token.value.strip().lower()
  2514. if not value:
  2515. return disp_header
  2516. if value[0] != ';':
  2517. disp_header.defects.append(errors.InvalidHeaderDefect(
  2518. "Only parameters are valid after content disposition, but "
  2519. "found {!r}".format(value)))
  2520. _find_mime_parameters(disp_header, value)
  2521. return disp_header
  2522. disp_header.append(ValueTerminal(';', 'parameter-separator'))
  2523. disp_header.append(parse_mime_parameters(value[1:]))
  2524. return disp_header
  2525. def parse_content_transfer_encoding_header(value):
  2526. """ mechanism
  2527. """
  2528. # We should probably validate the values, since the list is fixed.
  2529. cte_header = ContentTransferEncoding()
  2530. if not value:
  2531. cte_header.defects.append(errors.HeaderMissingRequiredValue(
  2532. "Missing content transfer encoding"))
  2533. return cte_header
  2534. try:
  2535. token, value = get_token(value)
  2536. except errors.HeaderParseError:
  2537. ctype.defects.append(errors.InvalidHeaderDefect(
  2538. "Expected content trnasfer encoding but found {!r}".format(value)))
  2539. else:
  2540. cte_header.append(token)
  2541. cte_header.cte = token.value.strip().lower()
  2542. if not value:
  2543. return cte_header
  2544. while value:
  2545. cte_header.defects.append(errors.InvalidHeaderDefect(
  2546. "Extra text after content transfer encoding"))
  2547. if value[0] in PHRASE_ENDS:
  2548. cte_header.append(ValueTerminal(value[0], 'misplaced-special'))
  2549. value = value[1:]
  2550. else:
  2551. token, value = get_phrase(value)
  2552. cte_header.append(token)
  2553. return cte_header