我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用gettext.find()。
def load(cls, dirname=None, locales=None, domain=None): """Load translations from the given directory. :param dirname: the directory containing the ``MO`` files :param locales: the list of locales in order of preference (items in this list can be either `Locale` objects or locale strings) :param domain: the message domain (default: 'messages') """ if locales is not None: if not isinstance(locales, (list, tuple)): locales = [locales] locales = [str(locale) for locale in locales] if not domain: domain = cls.DEFAULT_DOMAIN filename = gettext.find(domain, dirname, locales) if not filename: return NullTranslations() with open(filename, 'rb') as fp: return cls(fp=fp, domain=domain)
def to_locale(language, to_lower=False): """ Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is True, the last component is lower-cased (en_us). """ p = language.find('-') if p >= 0: if to_lower: return language[:p].lower() + '_' + language[p + 1:].lower() else: # Get correct locale for sr-latn if len(language[p + 1:]) > 2: return language[:p].lower() + '_' + language[p + 1].upper() + language[p + 2:].lower() return language[:p].lower() + '_' + language[p + 1:].upper() else: return language.lower()
def check_for_language(lang_code): """ Checks whether there is a global language file for the given language code. This is used to decide whether a user-provided language is available. lru_cache should have a maxsize to prevent from memory exhaustion attacks, as the provided language codes are taken from the HTTP request. See also <https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>. """ # First, a quick check to make sure lang_code is well-formed (#21458) if lang_code is None or not language_code_re.search(lang_code): return False for path in all_locale_paths(): if gettext_module.find('django', path, [to_locale(lang_code)]) is not None: return True return False
def initGetText(domain, install=False, fallback=True): locale_paths = [ Path(__file__).parent / ".." / "locale", Path(sys.prefix) / "share" / "locale", ] locale_dir, translation = None, None for locale_dir in [d for d in locale_paths if d.exists()]: if gettext.find(domain, str(locale_dir)): log.debug("Loading message catalogs from {}".format(locale_dir)) translation = gettext.translation(domain, str(locale_dir)) break if translation is None: # This with either throw FileNotFoundError (fallback=False) or set a # gettext.NullTranslations translation = gettext.translation(domain, str(locale_dir), fallback=fallback) assert translation if install: gettext.install(domain, str(locale_dir), names=["ngettext"]) return translation
def to_locale(language, to_lower=False): """ Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is True, the last component is lower-cased (en_us). """ p = language.find('-') if p >= 0: if to_lower: return language[:p].lower()+'_'+language[p+1:].lower() else: # Get correct locale for sr-latn if len(language[p+1:]) > 2: return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower() return language[:p].lower()+'_'+language[p+1:].upper() else: return language.lower()
def do_translate(message, translation_function): """ Translates 'message' using the given 'translation_function' name -- which will be either gettext or ugettext. It uses the current thread to find the translation object to use. If no current translation is activated, the message will be run through the default translation object. """ eol_message = message.replace('\r\n', '\n').replace('\r', '\n') global _default, _active t = _active.get(currentThread(), None) if t is not None: result = getattr(t, translation_function)(eol_message) else: if _default is None: from google.appengine._internal.django.conf import settings _default = translation(settings.LANGUAGE_CODE) result = getattr(_default, translation_function)(eol_message) if isinstance(message, SafeData): return mark_safe(result) return result
def _save_dictionary_parameter(self, dict_param): full_msg = self.data # look for %(blah) fields in string; # ignore %% and deal with the # case where % is first character on the line keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', full_msg) # if we don't find any %(blah) blocks but have a %s if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg): # apparently the full dictionary is the parameter params = copy.deepcopy(dict_param) else: params = {} for key in keys: try: params[key] = copy.deepcopy(dict_param[key]) except TypeError: # cast uncopyable thing to unicode string params[key] = six.text_type(dict_param[key]) return params
def to_language(locale): """Turns a locale name (en_US) into a language name (en-us).""" p = locale.find('_') if p >= 0: return locale[:p].lower() + '-' + locale[p + 1:].lower() else: return locale.lower()
def do_translate(message, translation_function): """ Translates 'message' using the given 'translation_function' name -- which will be either gettext or ugettext. It uses the current thread to find the translation object to use. If no current translation is activated, the message will be run through the default translation object. """ global _default # str() is allowing a bytestring message to remain bytestring on Python 2 eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n')) if len(eol_message) == 0: # Returns an empty value of the corresponding type if an empty message # is given, instead of metadata, which is the default gettext behavior. result = type(message)("") else: _default = _default or translation(settings.LANGUAGE_CODE) translation_object = getattr(_active, "value", _default) result = getattr(translation_object, translation_function)(eol_message) if isinstance(message, SafeData): return mark_safe(result) return result
def _init_secondary_locale(self): """ Init a secondary locale. Secondary locales are used to provide an alternate localization to the one used for the UI; for example, some reports offer the option to use a different language. """ if not self.localedir: LOG.warning("No Localedir provided, unable to find translations") if not self.localedomain: if _firstlocaledomain: self.localedomain = _first.localedomain else: self.localedomain = "gramps" _first = self._Locale__first_instance if not self.lang and _first.lang: self.lang = _first.lang if not self.language: if self.lang: trans = self.check_available_translations(self.lang) if trans: self.language = [trans] if not self.language and _first.language: self.language = _first.language self.calendar = self.collation = self.lang
def _get_translation(self, domain = None, localedir = None, languages=None): """ Get a translation of one of our classes. Doesn't return the singleton so that it can be used by get_addon_translation() """ if not domain: domain = self.localedomain if not languages: languages = self.language if not localedir: localedir = self.localedir for lang in languages: if gettext.find(domain, localedir, [lang]): translator = gettext.translation(domain, localedir, [lang], class_ = GrampsTranslations) translator._language = lang return translator elif lang.startswith("en") or lang.startswith("C"): translator = GrampsNullTranslations() translator._language = "en" return translator if not languages or len(languages) == 0: LOG.warning("No language provided, using US English") else: raise ValueError("No usable translations in %s for " % ':'.join(languages)) translator = GrampsNullTranslations() translator._language = "en" return translator
def find_catalog_files(docname, srcdir, locale_dirs, lang, compaction): if not(lang and locale_dirs): return [] domain = find_catalog(docname, compaction) files = [gettext.find(domain, path.join(srcdir, dir_), [lang]) for dir_ in locale_dirs] files = [path.relpath(f, srcdir) for f in files if f] return files
def _load_unicode_emoji_zwj_sequences(self): ''' Loads emoji property data from emoji-zwj-sequences.txt http://unicode.org/Public/emoji/5.0/emoji-zwj-sequences.txt ''' dirnames = (USER_DATADIR, DATADIR) basenames = ('emoji-zwj-sequences.txt',) (path, open_function) = _find_path_and_open_function( dirnames, basenames) if not path: sys.stderr.write( '_load_unicode_emoji_zwj_sequences(): could not find "%s" in "%s"\n' %(basenames, dirnames)) return with open_function(path, mode='rt') as unicode_emoji_zwj_sequences_file: for line in unicode_emoji_zwj_sequences_file.readlines(): unicode_version = '' pattern = re.compile( r'[^;]*;[^;]*;[^;]*#\s*(?P<uversion>[0-9]+\.[0-9]+)\s*' + r'\[[0-9]+\]') match = pattern.match(line) if match and match.group('uversion'): unicode_version = match.group('uversion') line = re.sub(r'#.*$', '', line).strip() if not line: continue codepoints, property, name = [ x.strip() for x in line.split(';')[:3]] emoji_string = '' for codepoint in codepoints.split(' '): emoji_string += chr(int(codepoint, 16)) if emoji_string: self._add_to_emoji_dict( (emoji_string, 'en'), 'properties', [property]) self._add_to_emoji_dict( (emoji_string, 'en'), 'names', [name.lower()]) if unicode_version: self._add_to_emoji_dict( (emoji_string, 'en'), 'uversion', unicode_version)
def __init__(self): self.translators = { locale: gettext.GNUTranslations( open(gettext.find( GETTEXT_DOMAIN, GETTEXT_DIR, languages=[locale] ), 'rb') ) for locale in available_locales.keys() if locale != 'en_US' # No translation file for en_US } self.locale_stack = list()
def to_language(locale): """Turns a locale name (en_US) into a language name (en-us).""" p = locale.find('_') if p >= 0: return locale[:p].lower()+'-'+locale[p+1:].lower() else: return locale.lower()
def check_for_language(lang_code): """ Checks whether there is a global language file for the given language code. This is used to decide whether a user-provided language is available. This is only used for language codes from either the cookies or session. """ from google.appengine._internal.django.conf import settings globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale') if gettext_module.find('django', globalpath, [to_locale(lang_code)]) is not None: return True else: return False
def has_option_desktop(self, key): if key in self.STATIC_DATA: return True key = self._apply_mapping(key) return not self.appinfo_xml.find(key) is None
def desktopf(self): subelm = self.appinfo_xml.find("id") return subelm.text
def get_available_languages(domain): """Lists the available languages for the given translation domain. :param domain: the domain to get languages for """ if domain in _AVAILABLE_LANGUAGES: return copy.copy(_AVAILABLE_LANGUAGES[domain]) localedir = '%s_LOCALEDIR' % domain.upper() find = lambda x: gettext.find(domain, localedir=os.environ.get(localedir), languages=[x]) # NOTE(mrodden): en_US should always be available (and first in case # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was # renamed to locale_identifiers() in >=1.0, the requirements master list # requires >=0.9.6, uncapped, so defensively work with both. We can remove # this check when the master list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() for i in locale_identifiers: if find(i) is not None: language_list.append(i) _AVAILABLE_LANGUAGES[domain] = language_list return copy.copy(language_list)
def get_language_from_request(request, check_path=False): """ Analyzes the request to find what language the user wants the system to show. Only languages listed in settings.LANGUAGES are taken into account. If the user requests a sublanguage where we have a main language, we send out the main language. If check_path is True, the URL path prefix will be checked for a language code, otherwise this is skipped for backwards compatibility. """ if check_path: lang_code = get_language_from_path(request.path_info) if lang_code is not None: return lang_code supported_lang_codes = get_languages() if hasattr(request, 'session'): lang_code = request.session.get(LANGUAGE_SESSION_KEY) if lang_code in supported_lang_codes and lang_code is not None and check_for_language(lang_code): return lang_code lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME) try: return get_supported_language_variant(lang_code) except LookupError: pass accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '') for accept_lang, unused in parse_accept_lang_header(accept): if accept_lang == '*': break if not language_code_re.search(accept_lang): continue try: return get_supported_language_variant(accept_lang) except LookupError: continue try: return get_supported_language_variant(settings.LANGUAGE_CODE) except LookupError: return settings.LANGUAGE_CODE
def get_available_languages(domain): """Lists the available languages for the given translation domain. :param domain: the domain to get languages for """ if domain in _AVAILABLE_LANGUAGES: return copy.copy(_AVAILABLE_LANGUAGES[domain]) localedir = '%s_LOCALEDIR' % domain.upper() find = lambda x: gettext.find(domain, localedir=os.environ.get(localedir), languages=[x]) # NOTE(mrodden): en_US should always be available (and first in case # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was # renamed to locale_identifiers() in >=1.0, the requirements master list # requires >=0.9.6, uncapped, so defensively work with both. We can remove # this check when the master list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() for i in locale_identifiers: if find(i) is not None: language_list.append(i) # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they # are perfectly legitimate locales: # https://github.com/mitsuhiko/babel/issues/37 # In Babel 1.3 they fixed the bug and they support these locales, but # they are still not explicitly "listed" by locale_identifiers(). # That is why we add the locales here explicitly if necessary so that # they are listed as supported. aliases = {'zh': 'zh_CN', 'zh_Hant_HK': 'zh_HK', 'zh_Hant': 'zh_TW', 'fil': 'tl_PH'} for (loc, alias) in six.iteritems(aliases): if loc in language_list and alias not in language_list: language_list.append(alias) _AVAILABLE_LANGUAGES[domain] = language_list return copy.copy(language_list)
def find_catalog_source_files(locale_dirs, locale, domains=None, gettext_compact=False, charset='utf-8', force_all=False): """ :param list locale_dirs: list of path as `['locale_dir1', 'locale_dir2', ...]` to find translation catalogs. Each path contains a structure such as `<locale>/LC_MESSAGES/domain.po`. :param str locale: a language as `'en'` :param list domains: list of domain names to get. If empty list or None is specified, get all domain names. default is None. :param boolean gettext_compact: * False: keep domains directory structure (default). * True: domains in the sub directory will be merged into 1 file. :param boolean force_all: Set True if you want to get all catalogs rather than updated catalogs. default is False. :return: [CatalogInfo(), ...] """ if not locale: return [] # locale is not specified catalogs = set() for locale_dir in locale_dirs: if not locale_dir: continue # skip system locale directory base_dir = path.join(locale_dir, locale, 'LC_MESSAGES') if not path.exists(base_dir): continue # locale path is not found for dirpath, dirnames, filenames in walk(base_dir, followlinks=True): filenames = [f for f in filenames if f.endswith('.po')] for filename in filenames: base = path.splitext(filename)[0] domain = path.relpath(path.join(dirpath, base), base_dir) if gettext_compact and path.sep in domain: domain = path.split(domain)[0] domain = domain.replace(path.sep, SEP) if domains and domain not in domains: continue cat = CatalogInfo(base_dir, domain, charset) if force_all or cat.is_outdated(): catalogs.add(cat) return catalogs
def _load_unicode_data(self): '''Loads emoji names from UnicodeData.txt''' dirnames = (USER_DATADIR, DATADIR, # On Fedora, the “unicode-ucd” package has the # UnicodeData.txt file here: '/usr/share/unicode/ucd') basenames = ('UnicodeData.txt',) (path, open_function) = _find_path_and_open_function( dirnames, basenames) if not path: sys.stderr.write( '_load_unicode_data(): could not find "%s" in "%s"\n' %(basenames, dirnames)) return with open_function(path, mode='rt') as unicode_data_file: for line in unicode_data_file.readlines(): if not line.strip(): continue codepoint_string, name, category = line.split(';')[:3] codepoint_integer = int(codepoint_string, 16) emoji_string = chr(codepoint_integer) if category in ('Cc', 'Co', 'Cs'): # Never load control characters (“Cc”), they cause # too much problems when trying to display # them. Never load the “First” and “Last” # characters of private use characters “Co” and # surrogates (“Cs”) either as these are completely # useless. continue if (not self._unicode_data_all and not UNICODE_CATEGORIES[category]['valid'] and emoji_string not in VALID_CHARACTERS): continue self._add_to_emoji_dict( (emoji_string, 'en'), 'names', [name.lower()]) self._add_to_emoji_dict( (emoji_string, 'en'), 'ucategories', [ category, UNICODE_CATEGORIES[category]['major'], UNICODE_CATEGORIES[category]['minor'], ] )
def _load_unicode_emoji_data(self): ''' Loads emoji property data from emoji-data.txt http://unicode.org/Public/emoji/5.0/emoji-data.txt ''' dirnames = (USER_DATADIR, DATADIR) basenames = ('emoji-data.txt',) (path, open_function) = _find_path_and_open_function( dirnames, basenames) if not path: sys.stderr.write( '_load_unicode_emoji_data(): could not find "%s" in "%s"\n' %(basenames, dirnames)) return with open_function(path, mode='rt') as unicode_emoji_data_file: for line in unicode_emoji_data_file.readlines(): unicode_version = '' pattern = re.compile( r'[^;]*;[^;]*#\s*(?P<uversion>[0-9]+\.[0-9]+)\s*' + r'\[[0-9]+\]') match = pattern.match(line) if match and match.group('uversion'): unicode_version = match.group('uversion') line = re.sub(r'#.*$', '', line).strip() if not line: continue codepoint_string, property = [ x.strip() for x in line.split(';')[:2]] codepoint_range = [ int(x, 16) for x in codepoint_string.split('..')] if len(codepoint_range) == 1: codepoint_range.append(codepoint_range[0]) assert len(codepoint_range) == 2 for codepoint in range( codepoint_range[0], codepoint_range[1] + 1): emoji_string = chr(codepoint) self._add_to_emoji_dict( (emoji_string, 'en'), 'properties', [property]) if unicode_version: self._add_to_emoji_dict( (emoji_string, 'en'), 'uversion', unicode_version)
def _load_unicode_emoji_sequences(self): ''' Loads emoji property data from emoji-data.txt http://unicode.org/Public/emoji/5.0/emoji-sequences.txt ''' dirnames = (USER_DATADIR, DATADIR) basenames = ('emoji-sequences.txt',) (path, open_function) = _find_path_and_open_function( dirnames, basenames) if not path: sys.stderr.write( '_load_unicode_emoji_sequences(): could not find "%s" in "%s"\n' %(basenames, dirnames)) return with open_function(path, mode='rt') as unicode_emoji_sequences_file: for line in unicode_emoji_sequences_file.readlines(): unicode_version = '' pattern = re.compile( r'[^;]*;[^;]*;[^;]*#\s*(?P<uversion>[0-9]+\.[0-9]+)\s*' + r'\[[0-9]+\]') match = pattern.match(line) if match and match.group('uversion'): unicode_version = match.group('uversion') line = re.sub(r'#.*$', '', line).strip() if not line: continue codepoints, property, name = [ x.strip() for x in line.split(';')[:3]] if codepoints == '0023 FE0F 20E3' and name == 'keycap:': name = 'keycap: #' emoji_string = '' for codepoint in codepoints.split(' '): emoji_string += chr(int(codepoint, 16)) if emoji_string: self._add_to_emoji_dict( (emoji_string, 'en'), 'properties', [property]) self._add_to_emoji_dict( (emoji_string, 'en'), 'names', [name.lower()]) if unicode_version: self._add_to_emoji_dict( (emoji_string, 'en'), 'uversion', unicode_version)