我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用slugify.slugify()。
def secure_path(path): dirname = os.path.dirname(path) filename = os.path.basename(path) file_base, file_ext = os.path.splitext(path) dirname = secure_filename(slugify(dirname, only_ascii=True)) file_base = secure_filename(slugify(file_base, only_ascii=True)) or 'unnamed' file_ext = secure_filename(slugify(file_ext, only_ascii=True)) if file_ext: filename = '.'.join([file_base, file_ext]) else: filename = file_base if len(filename) > 200: filename = '%s__%s' % (filename[:99], filename[-99:]) if dirname: return os.path.join(dirname, filename) return filename
def on_new_website(self): self.binder.update() name = self.find('new-website-name').value self.find('new-website-name').value = '' if not name: name = '_' slug = slugify(name) slugs = [x.slug for x in self.manager.config.websites] while slug in slugs: slug += '_' w = Website.create(name) w.slug = slug w.owner = self.context.session.identity self.manager.config.websites.append(w) self.manager.save() self.binder.populate()
def before_page_insert(mapper, connection, page): """Perform actions before a page is first created. These include: - Slugify the title of the page if no slug is provided. - Set the route of the page. """ # Slug if not page.slug: page.slug = slugify.slugify(page.title, to_lower=True, max_length=255) # Route if page.is_root: # Root page page.route = '/' else: slash = '' if page.base_route.endswith('/') else '/' page.route = page.base_route + slash + page.slug
def new_branch(): """ Creating new branch """ branch_type = slugify((prompt('Branch type:', default='feature'))) issue_id = prompt("Issue ID:") short_description = slugify(prompt('Short description:')) if not branch_type or not short_description: raise ValueError('[Branch type] and [Short description] are' 'mandatory.') if issue_id: issue_id = '-#{0}'.format(issue_id) branch_name = "{0}{1}-{2}".format(branch_type, issue_id, short_description) ru_sure = prompt( text='Branch name will be "{0}", Are sure? (y/n)'.format(branch_name), default='y' ) if ru_sure != 'y': return _git('checkout -b "{0}"'.format(branch_name))
def slugify(s, max_length=None): """ Transform a string to a slug that can be used in a url path. This method will first try to do the job with python-slugify if present. Otherwise it will process string by stripping leading and ending spaces, converting unicode chars to ascii, lowering all chars and replacing spaces and underscore with hyphen "-". :param s: str :param max_length: int :rtype: str """ s = ustr(s) if slugify_lib: # There are 2 different libraries only python-slugify is supported try: return slugify_lib.slugify(s, max_length=max_length) except TypeError: pass uni = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore').decode('ascii') slug = re.sub('[\W_]', ' ', uni).strip().lower() slug = re.sub('[-\s]+', '-', slug) return slug[:max_length]
def ensure_slug(self, dbsession) -> str: """Make sure post has a slug. Generate a slug based on the title, but only if blog post doesn't have one. :return: Generated slug as a string """ assert self.title if self.slug: return for attempt in range(1, 100): generated_slug = slugify(self.title) if attempt >= 2: generated_slug += "-" + str(attempt) # Check for existing hit if not dbsession.query(Post).filter_by(slug=generated_slug).one_or_none(): self.slug = generated_slug return self.slug raise RuntimeError("Could not generate slug for {}".format(self.title))
def crawl(url): domain = url.split("//www.")[-1].split("/")[0] html = requests.get(url).content soup = bs4.BeautifulSoup(html, "lxml") links = set(soup.findAll('a', href=True)) for link in links: sub_url = link['href'] page_name = link.string if domain in sub_url: try: page = requests.get(sub_url).content filename = slugify(page_name).lower() + '.html' with open(filename, 'wb') as f: f.write(page) except: pass
def __init__(self, identifier='', instance_of='association', base_name='Undefined', language=Language.ENG, scope='*', src_topic_ref='', dest_topic_ref='', src_role_spec='related', dest_role_spec='related'): super().__init__(identifier, instance_of, base_name, language) self.__scope = scope if scope == '*' else slugify(str(scope)) self.__members = [] if src_topic_ref != '' and src_role_spec != '' and dest_topic_ref != '' and dest_role_spec != '': src_member = Member(src_topic_ref, src_role_spec) dest_member = Member(dest_topic_ref, dest_role_spec) self.__members.append(src_member) self.__members.append(dest_member)
def __init__(self, name, value, entity_identifier, identifier='', data_type=DataType.STRING, scope='*', language=Language.ENG): if entity_identifier == '*': # Universal Scope. self.__entity_identifier = '*' else: self.__entity_identifier = slugify(str(entity_identifier)) self.__identifier = (str(uuid.uuid4()) if identifier == '' else slugify(str(identifier))) self.__scope = scope if scope == '*' else slugify(scope) self.name = name self.data_type = data_type self.language = language self.value = value
def getTmpPathForName(self, name, extension=None, copyFromCache=False, path_relative_to_tmp=''): 'Returns the tmp path for a file, and a flag indicating if the file exists. Will also check in the cache and copy to tmp if copyFromCache==True' unicodeName = unicode(name) dir_path = os.path.join(self.getTmpProjectPath(), path_relative_to_tmp) if not os.path.isdir(dir_path): os.makedirs(dir_path) slugifiedName = ".".join([slugify(unicodeName), extension]) if extension else slugify(unicodeName) tmpPath = os.path.join(dir_path, slugifiedName) fileExists = False if os.path.isfile(tmpPath): # file already exists in tmp path, return path and exists flag fileExists = True elif copyFromCache: # See if the file exists in cache, and copy over to project folder. cacheFilePath = os.path.join(self.getCachePath(), slugifiedName) if os.path.isfile(cacheFilePath): shutil.copy(cacheFilePath, tmpPath) fileExists = True return (tmpPath, fileExists)
def get_file_name_root( tree, mode, entity, task, software, output_type, name ): file_name = get_file_name_template(tree, mode, entity) file_name = update_variable( file_name, entity, task, software, output_type, name ) file_name = slugify(file_name, separator="_") file_name = apply_style(file_name, tree[mode]["file_name"].get("style", "")) return file_name
def add_box(self, basebox, basebox_url=None): if 'boxes' not in self.config(): self.config()['boxes'] = [] if 'basebox' not in basebox: raise ValueError('Invalid basebox dict provided') basebox.setdefault('name', slugify(basebox['basebox'])) basebox.setdefault('cpu', BASEBOX_DEFAULT_CPU) basebox.setdefault('ram', BASEBOX_DEFAULT_RAM) if basebox in self.config()['boxes']: return if basebox_url: self.config()['basebox_url'] = basebox_url self.config()['boxes'].append(basebox) self._load_infra()
def _validate_python(self, value, state=None): if len(value) > self.max_length: raise ValidationError('toolong', self) name_slug = slugify(value) # Check for duplicates in the database try: CompanyAlchemy.get_company(name_slug) except NoResultFound: # There are no duplicates, the validation is therefore successful pass else: # This company slug name is already present in the database, notify # the user that the company he's trying to register already exists. raise ValidationError('already_exists', self)
def _build_company_obj(self, **kwargs): company = CompanyAlchemy() company.id = slugify(kwargs['company_name']) company.name = kwargs['company_name'] company.logo_url = kwargs['company_logo'] company.url = kwargs['company_url'] company.description = kwargs['company_description'] company.technologies = self._parse_technologies( kwargs['company_technologies']) city_dict = json.loads(kwargs['company_city']) company.address = self._format_address( kwargs['company_street'], city_dict['name'], city_dict['country']) company.address_is_valid = True company.email = kwargs['company_email'] company.phone = kwargs['company_phone'] return company
def org_top_categories(orgs): g = Graph() for org_name in orgs: #print>>sys.stderr, "Processing", org_name, "top categories" org_uri = waan_uri(org_name) ln = local_name(org_uri) top_cat = load_incites_json_file(org_name, 'categories') for item in top_cat: cat = item['category'] category_uri = get_category_uri(cat) curi = D['topcategory-'] + ln + slugify(cat) g.add((curi, RDF.type, WOS.InCitesTopCategory)) g.add((curi, RDFS.label, Literal("{} - {}".format(org_name, cat)))) g.add((curi, WOS.number, Literal(item['count']))) g.add((curi, VIVO.relates, category_uri)) g.add((curi, VIVO.relates, org_uri)) #print g.serialize(format="turtle") ng = "http://localhost/data/incites-top-categories" backend.sync_updates(ng, g) return True
def add_grant(grant, pub_uri): """ Create a funder and grant(s). """ g = Graph() if grant.get("agency") is None: logger.info("No agency found for {} with ids.".format(pub_uri, ";".join(grant.get("ids", [])))) return g slug = slugify(grant["agency"]) uri = D['funder-' + slug] g.add((uri, RDF.type, WOS.Funder)) g.add((uri, RDFS.label, Literal(grant["agency"]))) for gid in grant["ids"]: label = "{} - {}".format(grant["agency"], gid) guri = D['grant-'] + slugify(label) g.add((guri, RDF.type, WOS.Grant)) g.add((guri, RDFS.label, Literal(label))) g.add((guri, WOS.grantId, Literal(gid))) g.add((guri, VIVO.relates, uri)) g.add((guri, VIVO.relates, pub_uri)) return g
def get_id_of_slug(slug, request, save_id_in_session): """ Returns the uid :param slug: slug :param request: self.request for a fallback :param save_id_in_session: Boolean :return: uid """ logger('IssueHelper', 'get_id_of_slug', 'slug: {}'.format(slug)) db_issues = get_not_disabled_issues_as_query().all() for issue in db_issues: if str(slugify(issue.title)) == str(slug): if save_id_in_session: request.session['issue'] = issue.uid return issue.uid return get_issue_id(request)
def __init__(self, login, fullname): self.uuid = str(uuid.uuid4()) self.fullname = fullname self.intra_uid = login if '@' in login: self.username = slugify(login.split('@')[0]) self.realm = login.split('@')[1] else: self.username = slugify(login) self.realm = 'legacy' count = 1 username = self.username while User.query.filter_by(username=self.username).first(): count += 1 self.username = f'{username}{str(count)}'
def step_6(self, args): if '!restart' in args: return await self.restart() if args and '!skip' not in args: newargs = [] for thing in args: newargs.append(slugify(thing, stopwords=['https', 'http', 'www'], separator='_')) self.server_config_build[5] = newargs elif '!skip' in args: args = 'nothing since you decided to skip' else: return Response('I didn\'t quite catch that! The input I picked up doesn\'t seem to be correct!\nPlease try again!', pm=True) self.step = 7 return Response('Okay, got it. Added {} to the list of black listed strings!\n\nNext up is the action to be taken upon finding a ' 'blacklisted word or if a person is rate limited over 4 times! \nI\'ll take `kick / ban / mute / nothing` as input for this option!' ' \n\t`example input: mute`'.format( args ), pm=True )
def __init__(self, identifier, instance_of, name='Undefined', description=None, location='[0.0, 0.0, 0.0]', # The x ("width"), y ("height"), z ("depth") coordinates. rotation='[0.0, 0.0, 0.0, 0.0]', # The x, y, z, and w coordinates. scale='1.0'): self.__identifier = slugify(str(identifier)) self.__instance_of = slugify(str(instance_of)) self.name = name self.description = description self.location = location self.rotation = rotation self.scale = scale self.__assets = [] self.__paths = [] self.__tags = [] self.__attributes = []
def generate(count): for _ in range(count): title = fake.sentence(nb_words=3) slug = slugify(title) github = 'https://github.com/opentrons/protocols/{}'.format( slug ) head = { 'title': title, 'slug': slug, 'source-url': github + '/protocol.py', 'github-url': github } tail = { field: generate_field(field) for field in schema.keys() - head.keys() } yield {**head, **tail}
def get_image_upload_path( app_name, prop_name, instance, current_filename, suffix='' ): timestamp = int(( datetime.now(tz=timezone.utc) - datetime(1970, 1, 1, tzinfo=timezone.utc) ).total_seconds()) filename = '{name}{suffix}_{timestamp}{ext}'.format( name=slugify(getattr(instance, prop_name), max_length=300), suffix=suffix, timestamp=str(timestamp), ext=os.path.splitext(current_filename)[1], ) return 'images/{app_name}/{filename}'.format( app_name=app_name, filename=filename, )
def create_puppy(): # validate attributes name = request.form.get("name") if not name: return "name required", 400 image_url = request.form.get("image_url") if not image_url: return "image_url required", 400 slug = slugify(name) # create in database puppy = Puppy(slug=slug, name=name, image_url=image_url) db.session.add(puppy) db.session.commit() # return HTTP response location = url_for("get_puppy", slug=slug) resp = jsonify({"message": "created"}) resp.status_code = 201 resp.headers["Location"] = location return resp
def create_puppy(): puppy, errors = puppy_schema.load(request.form) if errors: resp = jsonify(errors) resp.status_code = 400 return resp puppy.slug = slugify(puppy.name) db.session.add(puppy) db.session.commit() resp = jsonify({"message": "created"}) resp.status_code = 201 location = url_for("get_puppy", slug=puppy.slug) resp.headers["Location"] = location return resp
def edit_puppy(slug): puppy = Puppy.query.filter(Puppy.slug==slug).first_or_404() puppy, errors = puppy_schema.load(request.form, instance=puppy) if errors: resp = jsonify(errors) resp.status_code = 400 return resp puppy.slug = slugify(puppy.name) db.session.add(puppy) db.session.commit() resp = jsonify({"message": "updated"}) location = url_for("get_puppy", slug=puppy.slug) resp.headers["Location"] = location return resp
def reply_snippet(self, name, content=None, file=None, format="bash", ext="sh"): kwargs = { 'content': content, 'filetype': format, 'channels': self.body['channel'], 'filename': '.'.join([slugify(name), ext]), 'initial_comment': name, 'title': slugify(name) } if file: kwargs['file'] = file return self.reply(name, attachments=[kwargs])
def read_text(self, path_to_file): """Read_text will read text from a file Args: path_to_file (str): Path to a valid file Returns: Tuple containing the text of the document and the title of it. Else returns None """ if path_to_file is None: raise ValueError('parameter is of type None') if len(path_to_file) == 0: raise ValueError('Empty path tofile given') if os.path.isdir(path_to_file): raise ValueError('Path given is to a directory') full_text = open(path_to_file).read() title = os.path.basename(path_to_file) title = slugify.slugify(title.decode('utf-8'), only_ascii=True) return full_text, title
def default(self): if not self.app.pargs.resource_location: self.app.log.info("Please provide a resource location using -rl flag") return #import pdb; pdb.set_trace() url = self.app.pargs.resource_location transcript = None title = None with closing(urllib2.urlopen(url)) as sf: content = sf.read() soup = BeautifulSoup(content) title = slugify(soup.title.getText()) transcript = soup(itemprop='transcript')[0].attrs['content'] self.app.log.info("Succesfully parsed page") file_text ="{0}.txt".format(title.encode('utf-8')) with open(file_text, 'w') as transcript_file: transcript_file.write(transcript.encode('utf-8')) #import pdb; pdb.set_trace() self.app.log.info(u"Wrote transcript to: {0}".format(file_text.decode('utf-8')))
def generate_slug(target, value, oldvalue, initiator): if value and (not target.slug or value != oldvalue): target.slug = slugify(value)
def sanitize_title(title): """ Generate filename of the song to be downloaded. """ title = title.replace(' ', '_') title = title.replace('/', '_') # slugify removes any special characters title = slugify(title, ok='-_()[]{}', lower=False) return title
def write_playlist(username, playlist_id): results = spotify.user_playlist(username, playlist_id, fields='tracks,next,name') text_file = u'{0}.txt'.format(slugify(results['name'], ok='-_()[]{}')) log.info(u'Writing {0} tracks to {1}'.format( results['tracks']['total'], text_file)) tracks = results['tracks'] write_tracks(text_file, tracks)
def write_album(album): tracks = spotify.album_tracks(album['id']) text_file = u'{0}.txt'.format(slugify(album['name'], ok='-_()[]{}')) log.info(u'writing {0} tracks to {1}'.format( tracks['total'], text_file)) write_tracks(text_file, tracks)
def tag(self, tag, defaults=None): """ Looks up a tag by name (e.g. a helper method for get) or creates it with the given defaults. This method enforces tag case insensitivity and should be used over `get_or_create`. """ try: return self.get(slug=slugify(tag)), False except self.model.DoesNotExist: if defaults is None: defaults = {} return self.create(text=tag, **defaults), True
def camelcase(s): return slugify(s).title().replace("-","")
def _get_slugline(f): f['slugline'] = slugify(f['headline']) return None
def _update_slugline(s, f): if 'headline' in f.keys(): f['slugline'] = slugify(f['headline']) return None
def file_title(self): '''Convert object title to a format for use in the bag filename. Name is slugified, and then truncated to :attr:`title_length` and expanded as necessary to complete the current word. ''' # slugify the object title, then truncate, # then truncate to last word based on - delimiter title = slugify(self.object_title()) # NOTE: this slugify doesn't not lowercase letters; # do we want to add that? # if the title is longer than requested length, truncate # to nearest complete word if len(title) > self.title_length: # truncated title truncated_title = title[:self.title_length] # If truncated title ends with '-', then it already ends with # a complete word. Remove the last - and return. if truncated_title[-1] == '-': return truncated_title[:-1] # use extra title content after length to complete a partial word extra = title[self.title_length:] # title up to end of first word after title length extra_index = extra.find('-') # if no `-` was found, then truncated portion is the end of the # last word in the title. Weturn the whole title. if extra_index == -1: return title # otherwise, truncate to first complete word return title[:self.title_length + extra_index] return title
def parser(word, separator, skip=False, **kwargs): """ Parsers the pipe content Args: word (str): The string to transform separator (str): The slug separator. skip (bool): Don't parse the content kwargs (dict): Keyword arguments Kwargs: assign (str): Attribute to assign parsed content (default: exchangerate) stream (dict): The original item Returns: dict: The item Examples: >>> from meza.fntools import Objectify >>> >>> item = {'content': 'hello world'} >>> kwargs = {'stream': item} >>> parser(item['content'], '-', **kwargs) == 'hello-world' True """ if skip: parsed = kwargs['stream'] else: parsed = slugify(word.strip(), separator=separator) return parsed
def async_pipe(*args, **kwargs): """A processor module that asynchronously slugifies the field of an item. Args: item (dict): The entry to process kwargs (dict): The keyword arguments passed to the wrapper Kwargs: assign (str): Attribute to assign parsed content (default: slugify) field (str): Item attribute to operate on (default: 'content') Returns: Deferred: twisted.internet.defer.Deferred item with concatenated content Examples: >>> from riko.bado import react >>> from riko.bado.mock import FakeReactor >>> >>> def run(reactor): ... callback = lambda x: print(next(x)['slugify'] == 'hello-world') ... d = async_pipe({'content': 'hello world'}) ... return d.addCallbacks(callback, logger.error) >>> >>> try: ... react(run, _reactor=FakeReactor()) ... except SystemExit: ... pass ... True """ return parser(*args, **kwargs)
def pipe(*args, **kwargs): """A processor that slugifies the field of an item. Args: item (dict): The entry to process kwargs (dict): The keyword arguments passed to the wrapper Kwargs: conf (dict): The pipe configuration. May contain the key 'separator'. separator (str): The slug separator (default: '-') assign (str): Attribute to assign parsed content (default: slugify) field (str): Item attribute to operate on (default: 'content') Yields: dict: an item with concatenated content Examples: >>> next(pipe({'content': 'hello world'}))['slugify'] == 'hello-world' True >>> slugified = 'hello_world' >>> conf = {'separator': '_'} >>> item = {'title': 'hello world'} >>> kwargs = {'conf': conf, 'field': 'title', 'assign': 'result'} >>> next(pipe(item, **kwargs))['result'] == slugified True """ return parser(*args, **kwargs)
def before_post_insert(mapper, connection, post): """Perform actions before a post is first created. These include: - Slugify the title of the post. """ if not post.slug: post.slug = slugify.slugify(post.title, to_lower=True, max_length=255)
def _slugify(string, entities=True, decimal=True, hexadecimal=True, max_length=0, word_boundary=False, separator='-', save_order=False, stopwords=()): if not HAS_SLUGIFY: module.fail_json(msg='slugify required for this module') if string is None: return '' sanitzed_string = _string_sanity_check(string) return slugify(sanitzed_string, entities, decimal, hexadecimal, max_length, word_boundary, separator, save_order, stopwords)
def filters(self): return { 'camelize': camelize, 'clean': clean, 'classify': classify, 'count': count, 'dasherize': dasherize, 'decapitalize': decapitalize, 'dedent': dedent, 'ends_with': ends_with, 'escape_html': escape_html, 'humanize': humanize, 'includes': includes, 'insert': insert, 'lines': lines, 'lpad': lpad, 'ltrim': ltrim, 'rpad': rpad, 'rtrim': rtrim, 'repeat': repeat, 'slugify': _slugify, 'splice': splice, 'starts_with': starts_with, 'successor': successor, 'swap_case': swap_case, 'transliterate': transliterate, 'underscore': underscore, 'unescape_html': unescape_html } # ---
def __init__(self, author, title, body, description, slug=None, **kwargs): db.Model.__init__(self, author=author, title=title, description=description, body=body, slug=slug or slugify(title), **kwargs)
def start_crawl_process(process_params): spider_class, connector_class = process_params lock_name = slugify(spider_class.__name__) with _get_lock(lock_name) as acquired: if acquired: crawl([spider_class], connector_class) else: print("Crawl process of \"%s\" already running" % lock_name)
def timestamp(self): return slugify(unicode(str(datetime.now().replace(microsecond=0))))
def get_slug(self): return slugify(getattr(self, "slug_source"), to_lower=True)
def construct_mailfilename(outpath_format, mailinfo, outdir, folder, mailid): envelope = mailinfo[b'ENVELOPE'] #get internaldate if envelope has none date = envelope.date if date is None: date = mailinfo[b'INTERNALDATE'] #limit subject to 128 characters subject = '' try: subject = decode_field(envelope.subject or '') except UnicodeDecodeError: logging.debug('ERROR: Unicode Error in mailid: %d' % mailid) logging.debug(str(mailinfo)) subject = slugify(subject, separator=' ') if len(subject) > 128: subject = subject[:125] + '...' replacedict = { '#F': folder, '#Y': '%02d' % date.year, '#M': '%02d' % date.month, '#D': '%02d' % date.day, '#h': '%02d' % date.hour, '#m': '%02d' % date.minute, '#s': '%02d' % date.second, '#S': subject, '#id': str(mailid) } mailfilename = outpath_format for key, value in replacedict.items(): mailfilename = mailfilename.replace(key, value) #mark file as downloaded mailfilename = os.path.join(args.outdir, mailfilename) return mailfilename
def testThreadSlugGeneration(self): thread = Thread.objects.create(title='Slug Test 1!', topic=self.topic, op=Post.objects.create()) slug = slugify(thread.title, to_lower=True, max_length=80) self.assertEqual(thread.slug, slug)