我们从Python开源项目中,提取了以下38个代码示例,用于说明如何使用email.errors.CharsetError()。
def __init__(self, input_charset=DEFAULT_CHARSET): # RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to # unicode because its .lower() is locale insensitive. If the argument # is already a unicode, we leave it at that, but ensure that the # charset is ASCII, as the standard (RFC XXX) requires. try: if isinstance(input_charset, unicode): input_charset.encode('ascii') else: input_charset = unicode(input_charset, 'ascii') except UnicodeError: raise errors.CharsetError(input_charset) input_charset = input_charset.lower().encode('ascii') # Set the input charset after filtering through the aliases and/or codecs if not (input_charset in ALIASES or input_charset in CHARSETS): try: input_charset = codecs.lookup(input_charset).name except LookupError: pass self.input_charset = ALIASES.get(input_charset, input_charset) # We can try to guess which encoding and conversion to use by the # charset_map dictionary. Try that first, but let the user override # it. henc, benc, conv = CHARSETS.get(self.input_charset, (SHORTEST, BASE64, None)) if not conv: conv = self.input_charset # Set the attributes, allowing the arguments to override the default. self.header_encoding = henc self.body_encoding = benc self.output_charset = ALIASES.get(conv, conv) # Now set the codecs. If one isn't defined for input_charset, # guess and try a Unicode codec with the same name as input_codec. self.input_codec = CODEC_MAP.get(self.input_charset, self.input_charset) self.output_codec = CODEC_MAP.get(self.output_charset, self.output_charset)
def __init__(self, input_charset=DEFAULT_CHARSET): # RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to # unicode because its .lower() is locale insensitive. If the argument # is already a unicode, we leave it at that, but ensure that the # charset is ASCII, as the standard (RFC XXX) requires. try: if isinstance(input_charset, unicode): input_charset.encode('ascii') else: input_charset = unicode(input_charset, 'ascii') except UnicodeError: raise errors.CharsetError(input_charset) input_charset = input_charset.lower() # Set the input charset after filtering through the aliases self.input_charset = ALIASES.get(input_charset, input_charset) # We can try to guess which encoding and conversion to use by the # charset_map dictionary. Try that first, but let the user override # it. henc, benc, conv = CHARSETS.get(self.input_charset, (SHORTEST, BASE64, None)) if not conv: conv = self.input_charset # Set the attributes, allowing the arguments to override the default. self.header_encoding = henc self.body_encoding = benc self.output_charset = ALIASES.get(conv, conv) # Now set the codecs. If one isn't defined for input_charset, # guess and try a Unicode codec with the same name as input_codec. self.input_codec = CODEC_MAP.get(self.input_charset, self.input_charset) self.output_codec = CODEC_MAP.get(self.output_charset, self.output_charset)
def test_unicode_charset_name(self): charset = Charset(u'us-ascii') self.assertEqual(str(charset), 'us-ascii') self.assertRaises(errors.CharsetError, Charset, 'asc\xffii') # Test multilingual MIME headers.
def __init__(self, input_charset=DEFAULT_CHARSET): # RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to # unicode because its .lower() is locale insensitive. If the argument # is already a unicode, we leave it at that, but ensure that the # charset is ASCII, as the standard (RFC XXX) requires. try: if isinstance(input_charset, str): input_charset.encode('ascii') else: input_charset = str(input_charset, 'ascii') except UnicodeError: raise errors.CharsetError(input_charset) input_charset = input_charset.lower() # Set the input charset after filtering through the aliases self.input_charset = ALIASES.get(input_charset, input_charset) # We can try to guess which encoding and conversion to use by the # charset_map dictionary. Try that first, but let the user override # it. henc, benc, conv = CHARSETS.get(self.input_charset, (SHORTEST, BASE64, None)) if not conv: conv = self.input_charset # Set the attributes, allowing the arguments to override the default. self.header_encoding = henc self.body_encoding = benc self.output_charset = ALIASES.get(conv, conv) # Now set the codecs. If one isn't defined for input_charset, # guess and try a Unicode codec with the same name as input_codec. self.input_codec = CODEC_MAP.get(self.input_charset, self.input_charset) self.output_codec = CODEC_MAP.get(self.output_charset, self.output_charset)
def test_unicode_charset_name(self): charset = Charset('us-ascii') self.assertEqual(str(charset), 'us-ascii') self.assertRaises(errors.CharsetError, Charset, 'asc\xffii') # Test multilingual MIME headers.
def test_unknown_charset(self): self._test('=?foobar?q?foo=ACbar?=', b'foo\xacbar'.decode('ascii', 'surrogateescape'), charset = 'foobar', # XXX Should this be a new Defect instead? defects = [errors.CharsetError])
def decode(ew): """Decode encoded word and return (string, charset, lang, defects) tuple. An RFC 2047/2243 encoded word has the form: =?charset*lang?cte?encoded_string?= where '*lang' may be omitted but the other parts may not be. This function expects exactly such a string (that is, it does not check the syntax and may raise errors if the string is not well formed), and returns the encoded_string decoded first from its Content Transfer Encoding and then from the resulting bytes into unicode using the specified charset. If the cte-decoded string does not successfully decode using the specified character set, a defect is added to the defects list and the unknown octets are replaced by the unicode 'unknown' character \\uFDFF. The specified charset and language are returned. The default for language, which is rarely if ever encountered, is the empty string. """ _, charset, cte, cte_string, _ = ew.split('?') charset, _, lang = charset.partition('*') cte = cte.lower() # Recover the original bytes and do CTE decoding. bstring = cte_string.encode('ascii', 'surrogateescape') bstring, defects = _cte_decoders[cte](bstring) # Turn the CTE decoded bytes into unicode. try: string = bstring.decode(charset) except UnicodeError: defects.append(errors.UndecodableBytesDefect("Encoded word " "contains bytes not decodable using {} charset".format(charset))) string = bstring.decode(charset, 'surrogateescape') except LookupError: string = bstring.decode('ascii', 'surrogateescape') if charset.lower() != 'unknown-8bit': defects.append(errors.CharsetError("Unknown charset {} " "in encoded word; decoded as unknown bytes".format(charset))) return string, charset, lang, defects
def decode(ew): """Decode encoded word and return (string, charset, lang, defects) tuple. An RFC 2047/2243 encoded word has the form: =?charset*lang?cte?encoded_string?= where '*lang' may be omitted but the other parts may not be. This function expects exactly such a string (that is, it does not check the syntax and may raise errors if the string is not well formed), and returns the encoded_string decoded first from its Content Transfer Encoding and then from the resulting bytes into unicode using the specified charset. If the cte-decoded string does not successfully decode using the specified character set, a defect is added to the defects list and the unknown octets are replaced by the unicode 'unknown' character \uFDFF. The specified charset and language are returned. The default for language, which is rarely if ever encountered, is the empty string. """ _, charset, cte, cte_string, _ = ew.split('?') charset, _, lang = charset.partition('*') cte = cte.lower() # Recover the original bytes and do CTE decoding. bstring = cte_string.encode('ascii', 'surrogateescape') bstring, defects = _cte_decoders[cte](bstring) # Turn the CTE decoded bytes into unicode. try: string = bstring.decode(charset) except UnicodeError: defects.append(errors.UndecodableBytesDefect("Encoded word " "contains bytes not decodable using {} charset".format(charset))) string = bstring.decode(charset, 'surrogateescape') except LookupError: string = bstring.decode('ascii', 'surrogateescape') if charset.lower() != 'unknown-8bit': defects.append(errors.CharsetError("Unknown charset {} " "in encoded word; decoded as unknown bytes".format(charset))) return string, charset, lang, defects