欢迎来到RankFan的Blogs

扩大
缩小

Networkx 常用

network statistics

print('* ' * 30)
print('network statistics')
print(nx.info(G))
print(nx.is_connected(G))
components = nx.connected_components(G)
print('num of connected_components:', nx.number_connected_components(G))

triadic_closure = nx.transitivity(G)
print("Triadic closure:", triadic_closure)

n = len(G.nodes)
sorted_degree, sorted_eigenvector, sorted_betweenness = nf.nx_statics(G)

1 degree distribution

# https://mp.weixin.qq.com/s/-41OzWUbELGO5zjFD0-TgA
mean_degree = np.sum([i[1] for i in sorted_degree]) / n
x = list(range(max([i[1] for i in sorted_degree]) + 1))
y = [i / n for i in nx.degree_histogram(G)]

plt.plot(x, y, 'ro-')
plt.xlabel("$k$")
plt.ylabel("$p_k$")
plt.show(block=True)
log_x, log_y = log_axis(x, y)

2 diameter_g

diameter_g = nx.diameter(G)
local_efficiency = nx.local_efficiency(G)
global_efficiency = nx.global_efficiency(G)
average_shortest_path_length = nx.average_shortest_path_length(G)

print('diameter: ', diameter_g, '\n',
      'local_efficiency: ', local_efficiency, '\n',
      'global_efficiency: ', global_efficiency, '\n',
      'average_shortest_path_length: ', average_shortest_path_length, )

3 clustering efficient

# clustering = np.mean(list(nx.clustering(G).values()))
average_clustering = nx.average_clustering(G)  # 平均集聚系数
transitivity = nx.transitivity(G)  # 全局集聚系数, 网络效率

4 度-度相关性

# https://mp.weixin.qq.com/s/XgVYyxA9LQZiQV1lwbdOIQ
# 基于Pearson相关系数的度-度相关性, Assortativity coefficient(同配系数)
degree_ac = nx.degree_assortativity_coefficient(G)
pearson_r = nx.degree_pearson_correlation_coefficient(G)
sorted_k, k_nn_k = average_nearest_neighbor_degree(G)

fig, ax = plt.subplots(1, 1, figsize=(8, 6))
plt.plot(sorted_k, sorted_k, 'gv', label='pearson correlation = ' + '%.3f' % pearson_r)
plt.legend(loc=0)
plt.xlabel("$k$")
plt.ylabel("$k_{nn}(k)$")
# plt.xscale("log")
# plt.yscale("log")
plt.title('celegans_metabolic')
plt.ylim([1, 100])

plt.tight_layout()
plt.show(block=True)

5. 介数

 # 节点介数
bc = nx.betweenness_centrality(G)
sorted_bc = sorted(bc.items(), key=itemgetter(1), reverse=True)
print('max bc id: ', sorted_bc[0])

# 边介数
ebc = nx.edge_betweenness_centrality(G)
sorted_ebc = sorted(ebc.items(), key=itemgetter(1), reverse=True)
print('max bc id: ', sorted_ebc[0])

# 核度
ks = nx.core_number(G)
sorted_ks = sorted(ks.items(), key=itemgetter(1), reverse=True)
print('max bc id: ', sorted_ks[0])

# 网络密度
density = nx.density(G)
# print(nx.density(kcg))

6. 中心性指标

# 度中心性
dc = nx.degree_centrality(G)
# 介数中心性
bc = nx.betweenness_centrality(G)
# 接近度中心性
cc = nx.closeness_centrality(G)
# 特征向量中心性
ec = nx.eigenvector_centrality(G)

# 绘图比较
plt.figure(figsize=(10, 10))
plt.subplot(221)
plt.plot(dc.keys(), dc.values(), 'ro', label='ER')
plt.legend(loc=0)
plt.xlabel("node label")
plt.ylabel("dc")
plt.title("degree_centrality")

plt.subplot(222)
plt.plot(bc.keys(), bc.values(), 'ro', label='ER')
plt.legend(loc=0)
plt.xlabel("node label")
plt.ylabel("bc")
plt.title("betweenness_centrality")

plt.subplot(223)
plt.plot(cc.keys(), cc.values(), 'ro', label='ER')
plt.legend(loc=0)
plt.xlabel("node label")
plt.ylabel("cc")
plt.title("closeness_centrality")

plt.subplot(224)
plt.plot(ec.keys(), ec.values(), 'ro', label='ER')
plt.legend(loc=0)
plt.xlabel("node label")
plt.ylabel("ec")
plt.title("eigenvector_centrality")

7 社区检测

import community
communities = community.best_partition(G)
nx.set_node_attributes(G, communities, 'modularity')
nx.get_node_attributes(G, 'eigenvector')

# First get a list of just the nodes in that class
class0 = [n for n in G.nodes() if communities[n] == 0]
# Then create a dictionary of the eigenvector centralities of those nodes
class0_eigenvector = {n: G.nodes[n]['eigenvector'] for n in class0}
# Then sort that dictionary and print the first 5 results
class0_sorted_by_eigenvector = sorted(class0_eigenvector.items(), key=itemgetter(1), reverse=True)

posted on 2023-10-21 09:00  RankFan  阅读(12)  评论(0编辑  收藏  举报

导航