我们从Python开源项目中,提取了以下9个代码示例,用于说明如何使用networkx.number_of_edges()。
def save_graph(self, graphname, fmt='gpickle'): """ Saves the graph to disk **Positional Arguments:** graphname: - Filename for the graph **Optional Arguments:** fmt: - Output graph format """ self.g.graph['ecount'] = nx.number_of_edges(self.g) if fmt == 'gpickle': nx.write_gpickle(self.g, graphname) elif fmt == 'graphml': nx.write_graphml(self.g, graphname) else: raise ValueError('graphml is the only format currently supported') pass
def Euler_Tour(multigraph): """ Uses Fleury's algorithm to find the Euler Tour of the MultiGraph. """ tour = [] temp_graph = nx.MultiGraph() graph_nodes = nx.nodes(multigraph) current_node = graph_nodes[0] tour.append(current_node) while nx.number_of_edges(multigraph) > 0: for edge in multigraph.edges(current_node): temp_graph = copy.deepcopy(multigraph) temp_graph.remove_edge(edge[0], edge[1], key=None) if nx.is_connected(temp_graph): tour.append(edge[1]) current_node = edge[1] multigraph.remove_edge(edge[0], edge[1], key=None) break else: tour.append(edge[1]) current_node = edge[1] multigraph.remove_edge(edge[0], edge[1], key=None) multigraph.remove_nodes_from(nx.isolates(multigraph)) return tour
def create_graph(self): path = "" data = pd.read_csv(path + '../../worldoss:ocean/Web_Crawler/generated_repo_topic_data.csv', error_bad_lines=False, header=None, sep=",", delimiter='\n') # pandas ?????? ???? SNA ?? csv ?? ???? # Creating node list node = [] for i in data.values: for j in i[0].split(',')[1:]: node.append(j) node = list(set(node)) # Creating edge list self.edges = [] for i in data.values: l = i[0].split(',')[1:] for j in range(len(l)): for k in range(j + 1, len(l)): self.edges.append((l[j], l[k])) self.G = nx.Graph() self.G.add_nodes_from(node) self.G.add_edges_from(self.edges) print nx.number_of_nodes(self.G) print nx.number_of_edges(self.G)
def centrality(self): with open('community_test.csv','rU') as csvfile: reader = csv.reader(csvfile) for row in reader: cluster = row[1:] edges = [] print cluster for i in self.edges: for j in cluster: if i[0] == j: for k in cluster: if i[1] == k: edges.append(i) if i[1] == j: for k in cluster: if i[0] == k: edges.append(i) C = nx.Graph() C.add_nodes_from(cluster) C.add_edges_from(edges) node_count=nx.number_of_nodes(C) edge_count=nx.number_of_edges(C) print node_count, edge_count cent = self.degree_centrality_custom(C) print cent with open('centrality_test.csv','a') as csvfile: writer = csv.writer(csvfile) writer.writerow(['Community '+row[0],'Node: '+str(node_count),'Edge: '+str(edge_count)]) for i,j in cent.items(): writer.writerow([i,j]) print 'Finished Community '+row[0]
def Attributes_of_Graph(G): print "*Statistic attributes of graphs:" print "N", nx.number_of_nodes(G) print "M", nx.number_of_edges(G) print "C", nx.average_clustering(G) #print "<d>", nx.average_shortest_path_length(G) print "r", nx.degree_assortativity_coefficient(G) degree_list = list(G.degree_iter()) max_degree = 0 min_degree = 0 avg_degree_1 = 0.0 avg_degree_2 = 0.0 for node in degree_list: avg_degree_1 = avg_degree_1 + node[1] avg_degree_2 = avg_degree_2 + node[1]*node[1] if node[1] > max_degree: max_degree = node[1] if node[1] < min_degree: min_degree = node[1] #end for avg_degree = avg_degree_1/len(degree_list) avg_degree_square = (avg_degree_2/len(degree_list)) / (avg_degree*avg_degree) print "<k>", avg_degree print "k_max", max_degree print "H", avg_degree_square print "DH", float(max_degree-min_degree)/G.number_of_nodes() #************************************************************************
def Attributes_of_Graph(G): print "*Statistic attributes of graphs:" print "N", nx.number_of_nodes(G) print "M", nx.number_of_edges(G) print "C", nx.average_clustering(G) #print "<d>", nx.average_shortest_path_length(G) print "r", nx.degree_assortativity_coefficient(G) degree_list = list(G.degree_iter()) max_degree = 0 min_degree = 0 avg_degree_1 = 0.0 avg_degree_2 = 0.0 for node in degree_list: avg_degree_1 = avg_degree_1 + node[1] avg_degree_2 = avg_degree_2 + node[1]*node[1] if node[1] > max_degree: max_degree = node[1] if node[1] < min_degree: min_degree = node[1] #end for avg_degree = avg_degree_1/len(degree_list) avg_degree_square = (avg_degree_2/len(degree_list)) / (avg_degree*avg_degree) print "<k>", avg_degree print "k_max", max_degree print "H (degree heterogeneity)", avg_degree_square print "S (average span of degree distribution)", float(max_degree-min_degree)/G.number_of_nodes() #*******************************************************************
def Sum_of_weight(G): #nx.number_of_edges(nx.ego_graph(Hub_ego,n,1)) EdgeList = G.edges(data=True) #[(0, 1, {}), (1, 2, {}), (2, 3, {})] #print EdgeList Sum_of_weight = 0.0 for edge in EdgeList: Sum_of_weight = Sum_of_weight + edge[2]['weight'] #weight=string.atof(line[3]),timestamp=string.atof(line[5] #end for return Sum_of_weight
def Prediction_LinkScores_Ratio(G, Predictor, Proportion, Toleration, Predict_Gap): print "Prediction_LinkScores_Ratio!" Rank_List_Set = {} OK_Value = float(G.number_of_edges())/Proportion if nx.is_connected(G) == True: Edge_Set = G.edges(data='True') Total = 0 Error = 0 Rank_List_Set[0] = [Link_Predictors.Wighted_Link_Prediction(Predictor, G), nx.average_clustering(G), nx.average_shortest_path_length(G) ] ##Running time !!!!! ''' while 1: #print i,len(Edge_Set), Tep_Edge = [] Del = random.randint(0, len(Edge_Set)-1) Tep_Edge.append(Edge_Set[Del]) #print "random range:", len(Edge_Set)-1 #print Del, #Prediction with different training set G.remove_edge(Edge_Set[Del][0], Edge_Set[Del][1]) if nx.is_connected(G) != True: G.add_edges_from(Tep_Edge) Error = Error + 1 #print "Error:", Error else: #print Edge_Set[Del], Error = 0 Total = Total + 1 #print "Total:", Total if Total%Predict_Gap == 0: V1 = Link_Predictors.Wighted_Link_Prediction(Predictor, G) V2 = nx.average_clustering(G) V3 = nx.average_shortest_path_length(G) #V4 = Performance_Evaluation_AUC(Predictor, G, Probe_Set, Non_existing_links) Rank_List_Set[Total] = [V1,V2,V3] Edge_Set = G.edges(data='True') #end if if Total > OK_Value or Error == Toleration: #print "complete with Total, Error:", Total, Error return Rank_List_Set #end while ''' return Rank_List_Set #end if #return Rank_List_Set ##========================================================================================== #Native_Prediction_Experiment(G, 'WSD', Probe_Set, Top_L, 3) #Top_K, Deleted_Ratio
def Prediction_Experiment(G, Predictor, Probe_Set, Top_L, Deleted_Ratio): print "Prediction_Experiment!" #Get Evaluation Link Set-------- #Top_L = (G.number_of_edges() - 0) / Top_k #The top proportion 1/Top_k of edges are considered #Probe_Set = Probe_Set_Correspond_Training(G, Top_L, fpname) #****Get the probe set for evaluation***** #Get Ranking List with different deleted links ratio---------- Edge_Num = float(G.number_of_edges()) '''AUC = Performance_Evaluation_AUC(Predictor, G, Probe_Set)''' Unobserved_links = nx.non_edges(G) Non_existing_links = list(set(Unobserved_links).difference(set(Probe_Set))) AUC = Performance_Evaluation_AUC(Predictor, G, Probe_Set, Non_existing_links) Rank_List_Set = Prediction_LinkScores_Ratio(G, Predictor, Deleted_Ratio, 50, 30) #Prediction_LinkScores_Ratio(G, Predictor, Proportion, Toleration, Predict_Gap) #----Performance Evaluation with Precision under different Training Data Ratio---- Precision_Set = [] X_Set = [] Coefficient_Set = [] Avg_PathLen_Set = [] for key in sorted(Rank_List_Set.keys()): Rank_List_Sorted = sorted(Rank_List_Set[key][0], key=lambda edge: edge[2], reverse=True) Top_L_Rank_List = Rank_List_Sorted[0:Top_L] Coefficient_Set.append(Rank_List_Set[key][1]) Avg_PathLen_Set.append(Rank_List_Set[key][2]) #AUC_Set.append(Rank_List_Set[key][3]) #print key, Performance_Evaluation_Precision(Top_L_Rank_List, Probe_Set) X_Set.append(float(key)/Edge_Num) Precision_Set.append(Performance_Evaluation_Precision(Top_L_Rank_List, Probe_Set)) ''' #Draw Curve Graph if key%100 == 0: data = [] for edge in Rank_List_Sorted: data.append(edge[2]) matploit(data) ''' #end for print "*Different deleted links ratio:", X_Set print "*Precision_Set with different deleted links ratio:", Precision_Set print "*Coefficient_Set:", Coefficient_Set print "*Avg_PathLen_Set:", Avg_PathLen_Set print "*AUC Value:", AUC return 1 #def Native_Prediction_Experiment(G, Predictor, Probe_Set, Top_L, Deleted_Ratio):