我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用sets.Set()。
def removeduppaths(): """ Remove duplicate entries from sys.path along with making them absolute""" # This ensures that the initial path provided by the interpreter contains # only absolute pathnames, even if we're running from the build directory. L = [] known_paths = set() for dir in sys.path: # Filter out duplicate paths (on case-insensitive file systems also # if they only differ in case); turn relative paths into absolute # paths. dir, dircase = makepath(dir) if not dircase in known_paths: L.append(dir) known_paths.add(dircase) sys.path[:] = L return known_paths # XXX This should not be part of site.py, since it is needed even when # using the -S option for Python. See http://www.python.org/sf/586680
def setencoding(): """Set the string encoding used by the Unicode implementation. The default is 'ascii', but if you're willing to experiment, you can change this.""" encoding = "ascii" # Default value set by _PyUnicode_Init() if 0: # Enable to support locale aware default string encodings. import locale loc = locale.getdefaultlocale() if loc[1]: encoding = loc[1] if 0: # Enable to switch off string to Unicode coercion and implicit # Unicode to string conversion. encoding = "undefined" if encoding != "ascii": # On Non-Unicode builds this will raise an AttributeError... sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def endData(self, containerClass=NavigableString): if self.currentData: currentData = u''.join(self.currentData) if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and not set([tag.name for tag in self.tagStack]).intersection( self.PRESERVE_WHITESPACE_TAGS)): if '\n' in currentData: currentData = '\n' else: currentData = ' ' self.currentData = [] if self.parseOnlyThese and len(self.tagStack) <= 1 and \ (not self.parseOnlyThese.text or \ not self.parseOnlyThese.search(currentData)): return o = containerClass(currentData) o.setup(self.currentTag, self.previous) if self.previous: self.previous.next = o self.previous = o self.currentTag.contents.append(o)
def map_regions(self, regions): """Draw the key regions onto surface regions. Regions must have at least 3 byte pixels. Each pixel of the keyboard rectangle is set to the color (note, velocity, 0). The regions surface must be at least as large as (0, 0, self.rect.left, self.rect.bottom) """ # First draw the white key regions. Then add the overlapping # black key regions. # cutoff = self.black_key_height black_keys = [] for note in range(self._start_note, self._end_note + 1): key = self._keys[note] if key.is_white: fill_region(regions, note, key.rect, cutoff) else: black_keys.append((note, key)) for note, key in black_keys: fill_region(regions, note, key.rect, cutoff)
def parseFragment(self, stream, container="div", encoding=None, parseMeta=False, useChardet=True): """Parse a HTML fragment into a well-formed tree fragment container - name of the element we're setting the innerHTML property if set to None, default to 'div' stream - a filelike object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) """ self._parse(stream, True, container=container, encoding=encoding) return self.tree.getFragment()
def __init__(self, name): """Node representing an item in the tree. name - The tag name associated with the node parent - The parent of the current node (or None for the document node) value - The value of the current node (applies to text nodes and comments attributes - a dict holding name, value pairs for attributes of the node childNodes - a list of child nodes of the current node. This must include all elements but not necessarily other node types _flags - A list of miscellaneous flags that can be set on the node """ self.name = name self.parent = None self.value = None self.attributes = {} self.childNodes = [] self._flags = []
def render(self, context): key = self.queryset_var.var value = self.queryset_var.resolve(context) if isinstance(self.paginate_by, int): paginate_by = self.paginate_by else: paginate_by = self.paginate_by.resolve(context) paginator = Paginator(value, paginate_by, self.orphans) try: page_obj = paginator.page(context['request'].page) except InvalidPage: if INVALID_PAGE_RAISES_404: raise Http404('Invalid page requested. If DEBUG were set to ' + 'False, an HTTP 404 page would have been shown instead.') context[key] = [] context['invalid_page'] = True return '' if self.context_var is not None: context[self.context_var] = page_obj.object_list else: context[key] = page_obj.object_list context['paginator'] = paginator context['page_obj'] = page_obj return ''
def add_parser(self, name, **kwargs): # set prog from the existing prefix if kwargs.get('prog') is None: kwargs['prog'] = '%s %s' % (self._prog_prefix, name) aliases = kwargs.pop('aliases', ()) # create a pseudo-action to hold the choice help if 'help' in kwargs: help = kwargs.pop('help') choice_action = self._ChoicesPseudoAction(name, aliases, help) self._choices_actions.append(choice_action) # create the parser and add it to the map parser = self._parser_class(**kwargs) self._name_parser_map[name] = parser # make parser available under aliases also for alias in aliases: self._name_parser_map[alias] = parser return parser
def _add_action(self, action): # resolve any conflicts self._check_conflict(action) # add to actions list self._actions.append(action) action.container = self # index the action by any option strings it has for option_string in action.option_strings: self._option_string_actions[option_string] = action # set the flag if any option strings look like negative numbers for option_string in action.option_strings: if self._negative_number_matcher.match(option_string): if not self._has_negative_number_optionals: self._has_negative_number_optionals.append(True) # return the created action return action
def test_static(self): self.alias.dynamic = False self.alias.compile() self.alias.decodable_properties = set(['foo', 'bar']) attrs = { 'foo': 'foo', 'bar': 'bar', 'baz': 'baz', 'gak': 'gak', } ret = self.alias.getDecodableAttributes(self.obj, attrs) self.assertEqual(ret, {'foo': 'foo', 'bar': 'bar'})
def _add_library(self, name, sources, install_dir, build_info): """Common implementation for add_library and add_installed_library. Do not use directly""" build_info = copy.copy(build_info) name = name #+ '__OF__' + self.name build_info['sources'] = sources # Sometimes, depends is not set up to an empty list by default, and if # depends is not given to add_library, distutils barfs (#1134) if not 'depends' in build_info: build_info['depends'] = [] self._fix_paths_dict(build_info) # Add to libraries list so that it is build with build_clib self.libraries.append((name, build_info))
def __init__(self, sess, param_dict, num_worker, weight_combiner=None, port=10080, reusable=False): # threading.Thread.__init__(self) self._session = sess self._port = port self._param_dict = param_dict self._application = web.Application([(r"/", ParameterServerHandler, {'server':self})]) self._version = 0 self._sync_lock = threading.Lock() self._num_worker = num_worker self._ended_worker = sets.Set() self._http_server = None self._reusable = reusable if weight_combiner is None: self._weight_combiner = MeanWeightCombiner(num_worker) else: self._weight_combiner = weight_combiner
def get_adapter_dependency_roles(profiles): if not profiles: return [] dep_files = find_adapter_files(ADAPTER_MARKER_EXTENSION, profiles) all_deps = set() for profile_name, dep_file in dep_files.items(): with open(dep_file, 'r') as f: deps = yaml.safe_load(f) if not deps: deps = {} all_deps |= set(deps.get("role-dependencies", {})) return list(all_deps)
def crawler(urls, max_urls): crawled = Set() queued = Set(urls) pairs = [] while urls and len(crawled) < max_urls: page=urls.pop(0) if is_html(page): if page not in crawled: try: print(page) links=BeautifulSoup(urllib2.urlopen(page,timeout=5).read(), parseOnlyThese=SoupStrainer('a')) for link in links: url = domain + link['href'] if verify(url) and url not in queued: # print(url) urls.append('http://' +url) # print(urls) queued.add('http://' +url) # print(page) crawled.add(page) # print(crawled) except: continue return crawled,pairs