我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用networkx.average_clustering()。
def Attributes_of_Graph(G): print "*Statistic attributes of graphs:" print "N", nx.number_of_nodes(G) print "M", nx.number_of_edges(G) print "C", nx.average_clustering(G) #print "<d>", nx.average_shortest_path_length(G) print "r", nx.degree_assortativity_coefficient(G) degree_list = list(G.degree_iter()) max_degree = 0 min_degree = 0 avg_degree_1 = 0.0 avg_degree_2 = 0.0 for node in degree_list: avg_degree_1 = avg_degree_1 + node[1] avg_degree_2 = avg_degree_2 + node[1]*node[1] if node[1] > max_degree: max_degree = node[1] if node[1] < min_degree: min_degree = node[1] #end for avg_degree = avg_degree_1/len(degree_list) avg_degree_square = (avg_degree_2/len(degree_list)) / (avg_degree*avg_degree) print "<k>", avg_degree print "k_max", max_degree print "H", avg_degree_square print "DH", float(max_degree-min_degree)/G.number_of_nodes() #************************************************************************
def Attributes_of_Graph(G): print "*Statistic attributes of graphs:" print "N", nx.number_of_nodes(G) print "M", nx.number_of_edges(G) print "C", nx.average_clustering(G) #print "<d>", nx.average_shortest_path_length(G) print "r", nx.degree_assortativity_coefficient(G) degree_list = list(G.degree_iter()) max_degree = 0 min_degree = 0 avg_degree_1 = 0.0 avg_degree_2 = 0.0 for node in degree_list: avg_degree_1 = avg_degree_1 + node[1] avg_degree_2 = avg_degree_2 + node[1]*node[1] if node[1] > max_degree: max_degree = node[1] if node[1] < min_degree: min_degree = node[1] #end for avg_degree = avg_degree_1/len(degree_list) avg_degree_square = (avg_degree_2/len(degree_list)) / (avg_degree*avg_degree) print "<k>", avg_degree print "k_max", max_degree print "H (degree heterogeneity)", avg_degree_square print "S (average span of degree distribution)", float(max_degree-min_degree)/G.number_of_nodes() #*******************************************************************
def clustering_coefficient(self): g = self.getG() try: cc = nx.average_clustering(g) except: cc = None return cc
def main(filename, type, constructed_graph = -1): # 1. original graph original_graph_path = os.path.join("data",filename,"") original_graph = generate_graph(original_graph_path,filename,-1) plt.figure("original graph degree distribution") draw_degree(original_graph) print('original edge number: ',len(original_graph.edges())) # 2. reconstruct graph if constructed_graph == -1: reconstruct_graph_path = os.path.join("reconstruction", filename, type,"") reconstruct_graph_adj = pickle.load(open(glob.glob(reconstruct_graph_path+"*.adj")[0],'rb')) else: reconstruct_graph_adj = constructed_graph reconstruct_graph = adj2Graph(reconstruct_graph_adj, edgesNumber = len(original_graph.edges())) print('edge number: ', len(reconstruct_graph.edges())) plt.figure("reconstruct graph degree distribution") draw_degree(reconstruct_graph) print("Clustering: ",nx.average_clustering(original_graph), ' ', nx.average_clustering(reconstruct_graph)) # print("Diameter: ", nx.average_shortest_path_length(original_graph), ' ', nx.average_shortest_path_length(reconstruct_graph)) # print("degree centrality: ", nx.degree_centrality(original_graph), ' ', nx.degree_centrality(reconstruct_graph)) #print("closeness centrality: ", nx.closeness_centrality(original_graph), ' ', nx.closeness_centrality(reconstruct_graph)) plt.show()
def run(self): ip_addresses = ['192.168.1.%s' % x for x in range(1, self._number_clients)] ports = [x for x in range(1, 2)] clients = [] progress = 0 for ip_addr in ip_addresses: print_progress(progress, self._number_clients, suffix="Running simulation") for port in ports: progress += 1 client = Client(ip_addr, port, clients[0] if len(clients) > 0 else None, max_chache_size=self._number_connections_per_client) clients.append(client) connection = Connection(client, clients[0]) connection.initiate() bootstrapper_connections = clients[0].get_connections() for conn in bootstrapper_connections: connection = Connection(client, conn.second_client) connection.initiate() graph = networkx.nx.Graph() for client in clients: logging.error(client.get_ident()) logging.error(client.get_connection_idents()) for node in client.get_connections(): graph.add_edge(node.first_client.get_ident(), node.second_client.get_ident()) networkx.draw(graph, with_labels=False) plt.savefig("path_graph.pdf") print("Network is connected: %s" % networkx.is_connected(graph)) print("Average shortest path length: %s" % networkx.average_shortest_path_length(graph)) print("Average bipartite clustering coefficent %s" % networkx.average_clustering(graph)) print("Bipartite clustering coefficent %s" % networkx.clustering(graph)) print("degree_assortativity_coefficient %s" % networkx.degree_assortativity_coefficient(graph))
def structure_dependent_index(G, ebunch=None): if ebunch is None: ebunch = nx.non_edges(G) #C = nx.average_clustering(G) #d = nx.average_shortest_path_length(G) path_range = max(2, math.ceil(nx.average_shortest_path_length(G))) #print path_range def predict(u, v): #NeighborSet = nx.all_neighbors(G, u) #len( sorted(nx.common_neighbors(G, u, v) )) SD_Index = {} #Generate all simple paths in the graph G from source to target, length <= cutoff . paths = list( nx.all_simple_paths(G, source=u, target=v, cutoff = path_range)) print paths for path in paths: if SD_Index.has_key( len(path) ): SD_Index[len(path)] = SD_Index[len(path)] + 1.0 else: SD_Index[len(path)] = 1.0 #end for print SD_Index #Sum up the num of paths with different length Coefficient = 0.6 SD_Value = 0.0 key_Sequence = list(sorted(SD_Index.keys())) for key in key_Sequence: if key != 2: SD_Value = SD_Value + math.pow(Coefficient, key-2.0) * SD_Index[key] #end for return SD_Value #Coefficient = 0.6 Rank_List = [] for u, v in ebunch: Rank_List.append((u, v, predict(u, v))) return Rank_List #((u, v, predict(u, v)) for u, v in ebunch) ##======================================================================##
def Prediction_LinkScores_Ratio(G, Predictor, Proportion, Toleration, Predict_Gap): print "Prediction_LinkScores_Ratio!" Rank_List_Set = {} OK_Value = float(G.number_of_edges())/Proportion if nx.is_connected(G) == True: Edge_Set = G.edges(data='True') Total = 0 Error = 0 Rank_List_Set[0] = [Link_Predictors.Wighted_Link_Prediction(Predictor, G), nx.average_clustering(G), nx.average_shortest_path_length(G) ] ##Running time !!!!! ''' while 1: #print i,len(Edge_Set), Tep_Edge = [] Del = random.randint(0, len(Edge_Set)-1) Tep_Edge.append(Edge_Set[Del]) #print "random range:", len(Edge_Set)-1 #print Del, #Prediction with different training set G.remove_edge(Edge_Set[Del][0], Edge_Set[Del][1]) if nx.is_connected(G) != True: G.add_edges_from(Tep_Edge) Error = Error + 1 #print "Error:", Error else: #print Edge_Set[Del], Error = 0 Total = Total + 1 #print "Total:", Total if Total%Predict_Gap == 0: V1 = Link_Predictors.Wighted_Link_Prediction(Predictor, G) V2 = nx.average_clustering(G) V3 = nx.average_shortest_path_length(G) #V4 = Performance_Evaluation_AUC(Predictor, G, Probe_Set, Non_existing_links) Rank_List_Set[Total] = [V1,V2,V3] Edge_Set = G.edges(data='True') #end if if Total > OK_Value or Error == Toleration: #print "complete with Total, Error:", Total, Error return Rank_List_Set #end while ''' return Rank_List_Set #end if #return Rank_List_Set ##========================================================================================== #Native_Prediction_Experiment(G, 'WSD', Probe_Set, Top_L, 3) #Top_K, Deleted_Ratio