我们从Python开源项目中,提取了以下40个代码示例,用于说明如何使用textblob.TextBlob()。
def analyzetweets(self, access_token, access_token_secret, mytweets=False, q=None): auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) sentimentlist = [] subjectivitylist = [] number = NUMBER_OF_TWEETS tweets = tweepy.Cursor(api.user_timeline).items() if mytweets else tweepy.Cursor(api.search, q=q).items(number) for index, tweet in enumerate(tweets): analysis = TextBlob(tweet.text).sentiment sentimentlist.append(analysis.polarity) subjectivitylist.append(analysis.subjectivity) self.update_state(state="RUNNING", meta={"current": index + 1, "total": number}) sentimentavg = float(sum(sentimentlist) / max(len(sentimentlist), 1)) subjectivityavg = float(sum(subjectivitylist) / max(len(subjectivitylist), 1)) return {"current": number, "total": number, "subjectivityavg": subjectivityavg, "sentimentavg": sentimentavg}
def generate_html(paragraphs, title_text): doc = dominate.document(title='Summary: {}'.format(title_text)) with doc.head: style("""\ body { background-color: #F9F8F1; color: #2C232A; font-family: sans-serif; font-size: 1.2em; } """) with doc: div(id='header').add(h1(title_text)) with div(): attr(cls='body') for para in paragraphs: tb = TextBlob(para) with p(): for sentence in tb.sentences: span(sentence, style="color: {}".format(get_polarity_color(sentence.polarity))) return doc
def getSentiment(api, key): public_tweets = api.search(key) AvgSentiment = 0 noOfTweets = len(public_tweets) sum1 = 0 for tweet in public_tweets: text = tweet.text cleanedtext = ' '.join([word for word in text.split(' ') if len(word) > 0 and word[0] != '@' and word[0] != '#' and 'http' not in word and word != 'RT']) #print(cleanedtext) analysis = TextBlob(cleanedtext) sentiment = analysis.sentiment.polarity sum1 += sentiment if sentiment == 0: #ignore since not a opinion, its a general statement noOfTweets -= 1 if noOfTweets > 0: AvgSentiment = sum1/noOfTweets return AvgSentiment
def runmytfidf(completeComment, topNumber): commentList = [] #preprocessing the comments for i in range(0,10): commentList.append(tb(preprocessing(completeComment[i]))) returnList=[] #Obtaing the Top Key words for all the pages for i, page in enumerate(commentList): scores = {word: tfidf(word, page, commentList) for word in page.words} sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True) topWords=[] for word, score in sorted_words[:topNumber]: topWords.append(word) returnList.append(topWords) return returnList
def read_line_eos_noums(self, path): """ Generator. Similar as the function read_line_eos from the text_mani module. The only diference here is that we keep track of all the noums. :type path: str """ for line in open(path): if len(list(self.all_noums)) <= self.max_noums: blob = TextBlob(line) noums = set(blob.noun_phrases) self.all_noums = self.all_noums.union(noums) for word in line.split(): yield word yield '<eos>'
def get_polarity(text): """Get the degree of polarity of text. Parameters ---------- text : str The text to analyze. Returns ------- float The polarity of the text. """ cleaned_text = clean_text(text) analysis = TextBlob(cleaned_text) return analysis.sentiment.polarity
def findSentiment(keywords): k_aux = {} for k in keywords: blob = TextBlob(k) k_aux[k] = {} if blob.sentiment.polarity < 0: k_aux[k]['word'] = 'negative' elif blob.sentiment.polarity > 0: k_aux[k]['word'] = 'positive' else: k_aux[k]['word'] = 'neutral' k_aux[k]['sentiment'] = blob.sentiment.polarity k_aux[k]['subjectivity'] = blob.sentiment.subjectivity keywords = k_aux return keywords
def get_toot_sentiment(toot): ''' Utility function to classify sentiment of passed toots using textblob's sentiment method ''' # create TextBlob object of passed toot text analysis = TextBlob(toot) # set sentiment if analysis.sentiment.polarity > 0: return 'positive' elif analysis.sentiment.polarity == 0: return 'neutral' else: return 'negative'
def run(self): while 1: msg=self.queue_in.get() # get() is blocking chat_id=msg.get_chat_id() if re.search(r'^(?:/|!)translate ', msg.get_text().lower()) and len(msg.get_text().split(" "))>=2: foreign=msg.get_text().split(" ",1)[1] try: b=textblob.TextBlob(foreign) reply="{}".format(b.translate(to="de")) except textblob.exceptions.NotTranslated: reply="Error while translation. (Are there smileys in some words?)" self.bot.sendMessage(chat_id,reply)
def fix_typos(self): self.user_input = TextBlob(self.user_input.lower()).tags # Fix lazy user typos, or slang words = list() for i in self.user_input: words.append(i[0]) for part in range(len(words)): if words[part] in slang_typo_dict.keys(): words[part] = slang_typo_dict[words[part]] self.user_input = ' '.join(words) return False # Returns false to move on to help_check
def check_phrase_similarity(self): self.user_input = TextBlob(self.user_input.lower()).tags self.input_len = len(self.user_input) for phrase_type in PHRASE_TYPES: for phrase in getattr(keywords, phrase_type): score = float() for word in self.user_input: for n in phrase: if word and n not in unimportant_words: score += liquidmetal.score(n, word[0]) / self.input_len if score >= 0.7: # Could be increased/ decreased through testing to find more optimal value self.response = random.choice(getattr(responses, phrase_type)) return True return False
def polarity(text): """Return the text's sentiment polarity (-1 negative, 1 positive)""" return TextBlob(text).sentiment.polarity
def subjectivity(text): """Return the text's sentiment subjectivity (0 objective, 1 subjective)""" return TextBlob(text).sentiment.subjectivity # Calculated metrics: name, function that calculates them, and chart's color
def read_resume(file_path): resume_data = {} resume_data['content'] = TextBlob(read_file(file_path)) resume_data['skills'] = extract_skills(str(resume_data['content'])) resume_data['actions'] = extract_actions(resume_data['content']) return resume_data
def read_job(file_path): """Reads a text file with the job title on the first line and description following""" job_data = {} job_data['description'] = TextBlob(read_file(file_path)) job_data['title'] = str(job_data['description']).split('\n')[0] job_data['skills'] = extract_skills(str(job_data['description'])) job_data['noun_phrases'] = extract_nouns(job_data['description']) job_data['actions'] = extract_actions(job_data['description']) job_data['acronyms'] = extract_acronyms(job_data['description']) job_data['value_sentences'] = extract_value_sentences(job_data['description']) return job_data
def buscarTermo(termo): lista = list() bons = 0 ruins = 0 medios = 0 analysis = None twitts = twitter.buscar(termo) for twitt in twitts: palavras = texto.limparTexto(twitt.text) lista = lista + palavras.split() analysis = tb(twitt.text) if analysis.sentiment.polarity > 0: bons += 1 else: ruins +=1 count = Counter(lista) listagem = list() sentimento = list() sentimento.append({'bons':bons,'ruins':ruins,'media':np.mean(analysis.sentiment.polarity)}) for item in count: listagem.append({'text':item,"size":count[item],"repeticao":count[item]}) id = mongo.salvar(termo,listagem,sentimento) rd.salvar(termo,id) retorno = {'resultados':listagem,'sentimento':sentimento} return retorno
def sentiment_analysis(sentence): sentence = tb(sentence) if (sentence.sentiment.polarity < -0.3): return 'neg' elif (sentence.sentiment.polarity > -0.3 and sentence.sentiment.polarity < 0.3): return 'neutral' else: return 'pos'
def sentiment(data): try: blob = TextBlob(data) sentiment_value = round(blob.sentiment.polarity, 2) except Exception as err: print("At sentiment {}".format(err)) sentiment_value = None return sentiment_value
def delete_negative_comment(insta_username): # Function Logic to Delete Negative Comments.. media_id = get_post_id(insta_username) request_url = (BASE_URL + 'media/%s/comments/?access_token=%s') % (media_id, APP_ACCESS_TOKEN) print 'GET request url : %s\n' % (request_url) comment_info = requests.get(request_url).json() if comment_info['meta']['code'] == 200: #Check if the # re are Negative Comments Using textblob.sentiments Library if len(comment_info['data']): for x in range(0, len(comment_info['data'])): comment_id = comment_info['data'][x]['id'] comment_text = comment_info['data'][x]['text'] blob = TextBlob(comment_text, analyzer=NaiveBayesAnalyzer()) if (blob.sentiment.p_neg > blob.sentiment.p_pos): print '\t(-)Negative comment : %s' % (comment_text) delete_url = (BASE_URL + 'media/%s/comments/%s/?access_token=%s') % ( media_id, comment_id, APP_ACCESS_TOKEN) print 'DELETE request url : %s' % (delete_url) delete_info = requests.delete(delete_url).json() if delete_info['meta']['code'] == 200: #Check if negative comments are deleted or not print '\t\t\t*****Comment successfully deleted!*****\n' else: print '\t\t\t*****Unable to delete comment!*****\n' else: print '\n\t\t\t*****There are no existing comments on the post!*****' else: #if Page Not Found print '\n\t\t\t*****Status code other than 200 received!*****'
def run(text): return sentiment_polarity(TextBlob(text))