1、雙標(biāo)圖
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn import datasets
# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)
iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 將變量縮放為均值為0,標(biāo)準(zhǔn)偏差為1。使用StandardScaler也可以
# In general, it's a good idea to scale the data prior to PCA.
# scaler = StandardScaler()
# scaler.fit(data)
# data = scaler.transform(data)
pca = PCA()
x_new = pca.fit_transform(data)
def myplot(score,coeff,labels=None):
xs = score[:,0]
ys = score[:,1]
n = coeff.shape[0]
scalex = 1.0/(xs.max() - xs.min())
scaley = 1.0/(ys.max() - ys.min())
plt.scatter(xs * scalex,ys * scaley, c=y)
for i in range(n):
plt.arrow(0, 0, coeff[i,0], coeff[i,1],color='r',alpha = 1,
head_width=0.04,head_length=0.03,overhang=1)
if labels is None:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, "Var"+str(i+1), color = 'g', ha = 'center', va = 'center')
else:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, labels[i], color = 'g', ha = 'center', va = 'center')
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.xlabel("PC{}".format(1))
plt.ylabel("PC{}".format(2))
plt.grid()
#Call the function. Use only the 2 PCs.
myplot(x_new[:,0:2],np.transpose(pca.components_[0:2, :]),
["a1","a2","a3","a4","a5","a6","a7","a8","a9","a10"])
plt.show()
帶圖例的
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn import datasets
# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)
iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 將變量縮放為均值為0,標(biāo)準(zhǔn)偏差為1。使用StandardScaler也可以
# In general, it's a good idea to scale the data prior to PCA.
# scaler = StandardScaler()
# scaler.fit(data)
# data = scaler.transform(data)
pca = PCA()
x_new = pca.fit_transform(data)
def myplot(score, coeff, labels=None):
xs = score[:, 0]
ys = score[:, 1]
n = coeff.shape[0]
scalex = 1.0 / (xs.max() - xs.min())
scaley = 1.0 / (ys.max() - ys.min())
for i in range(3):
plt.scatter(xs[y == i] * scalex,
ys[y == i] * scaley,
linewidth=0.01,label=i)
for i in range(n):
plt.arrow(0, 0, coeff[i, 0], coeff[i, 1], color='r', alpha=1,
head_width=0.04, head_length=0.03, overhang=1)
if labels is None:
plt.text(coeff[i, 0] * 1.15, coeff[i, 1] * 1.15, "Var" + str(i + 1), color='g', ha='center', va='center')
else:
plt.text(coeff[i, 0] * 1.15, coeff[i, 1] * 1.15, labels[i], color='g', ha='center', va='center')
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel("PC{}".format(1))
plt.ylabel("PC{}".format(2))
plt.grid()
# Call the function. Use only the 2 PCs.
myplot(x_new[:, 0:2], np.transpose(pca.components_[0:2, :]),
["a1", "a2", "a3", "a4", "a5", "a6", "a7", "a8", "a9", "a10"])
plt.legend()
plt.show()
標(biāo)出95%的置信區(qū)間
from matplotlib.patches import Ellipse
from sklearn.decomposition import PCA
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
def plot_point_cov(points, nstd=3, ax=None, **kwargs):
# 求所有點(diǎn)的均值作為置信圓的圓心
pos = points.mean(axis=0)
# 求協(xié)方差
cov = np.cov(points, rowvar=False)
return plot_cov_ellipse(cov, pos, nstd, ax, **kwargs)
def plot_cov_ellipse(cov, pos, nstd=3, ax=None, **kwargs):
def eigsorted(cov):
cov = np.array(cov)
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:, order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
# 畫(huà)置信圓
def show_ellipse(X_pca, y, pca,feature_label=None):
# 定義顏色
colors = ['tab:blue', 'tab:orange', 'seagreen']
category_label = ['Ethiopia', 'Somalia', 'Kenya']
# 定義分辨率
plt.figure(dpi=100, figsize=(8, 6))
# 三分類(lèi)則為3
xs = X_pca[:, 0]
ys = X_pca[:, 1]
scalex = 1.0 / (xs.max() - xs.min())
scaley = 1.0 / (ys.max() - ys.min())
xs = xs * scalex
ys = ys * scaley
data = np.concatenate((xs[:,None],ys[:,None]),1)
for i in range(max(y)+1):
plt.plot(data[:,0][y == i],data[:,1][y == i],'.',color=colors[i], label=category_label[i], markersize=8)
plot_point_cov(data[y == i], nstd=3, alpha=0.25, color=colors[i])
plt.plot([0,0], [-1,1], '--', lw=1, color='#cccccc')
plt.plot([-1, 1], [0, 0], '--', lw=1, color='#cccccc')
coeff = np.transpose(pca.components_[0:2, :])
for i in range(coeff.shape[0]):
plt.arrow(0, 0, coeff[i, 0], coeff[i, 1], color='r', alpha=1,
head_width=0.04, head_length=0.03, overhang=1)
if feature_label is None:
plt.text(coeff[i, 0] * 1.15, coeff[i, 1] * 1.15, "Var" + str(i + 1), color='g', ha='center', va='center')
else:
plt.text(coeff[i, 0] * 1.15, coeff[i, 1] * 1.15, feature_label[i], color='g', ha='center', va='center')
# 添加坐標(biāo)軸
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xticks(size=10, family='Times New Roman')
plt.yticks(size=10, family='Times New Roman')
font = {'family': 'Times New Roman', 'size': 10}
plt.xlabel('PC1 ({} %)'.format(round(pca.explained_variance_ratio_[0] * 100, 2)), font)
plt.ylabel('PC2 ({} %)'.format(round(pca.explained_variance_ratio_[1] * 100, 2)), font)
plt.legend(prop={"family": "Times New Roman", "size": 8}, loc='upper right')
plt.show()
if __name__ == '__main__':
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = (X-np.mean(X,axis=0))/np.std(X,axis=0) # 將變量縮放為均值為0,標(biāo)準(zhǔn)偏差為1。使用StandardScaler也可以
pca = PCA()
x_new = pca.fit_transform(X)
show_ellipse(x_new, y, pca)
2、碎石圖
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets
# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)
iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 將變量縮放為均值為0,標(biāo)準(zhǔn)偏差為1。使用StandardScaler也可以
# 使用自助法隨機(jī)抽樣
np.random.seed(0)
sample = data[np.random.randint(0,100,100)]
var = []
for i in range(500):
sample_n = sample[np.random.randint(0,100,100)]
pca = PCA()
pca.fit(sample_n)
var.append(pca.explained_variance_ratio_)
var = np.array(var)
plt.errorbar(np.linspace(1,data.shape[1],data.shape[1]),np.mean(var,axis=0),yerr=np.std(var,axis=0),
lw=2,elinewidth=1.5,ms=5,capsize=3,fmt='b-o') # 'r-x': k控制折線顏色,o控制點(diǎn)的類(lèi)型
# print(pca.components_)
# print(pca.explained_variance_ratio_)
# print(np.mean(pca.components_,axis=1).sum())
# plt.plot(pca.explained_variance_ratio_,marker='o')
# plt.legend()
plt.show()
帶抖動(dòng)的散點(diǎn)圖集合
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets
data = np.random.random((1000,10))
y = np.random.randint(0,6,1000)
# iris = datasets.load_iris()
# data = iris.data
# y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 將變量縮放為均值為0,標(biāo)準(zhǔn)偏差為1。使用StandardScaler也可以
# 使用自助法隨機(jī)抽樣
np.random.seed(0)
sample = data[np.random.randint(0, 100, 100)]
var = []
for i in range(500):
sample_n = sample[np.random.randint(0, 100, 100)]
pca = PCA()
pca.fit(sample_n)
var.append(pca.explained_variance_ratio_)
var = np.array(var)
plt.errorbar(np.linspace(1, data.shape[1], data.shape[1]), np.mean(var, axis=0), yerr=np.std(var, axis=0),
lw=2, elinewidth=1.5, ms=5, capsize=5, fmt='b-o') # 'r-x': k控制折線顏色,o控制點(diǎn)的類(lèi)型
# 繪制具有抖動(dòng)的散點(diǎn)圖
x_jittered = np.random.uniform(-0.1,0.1,size=var.shape[0]*var.shape[1])
cc = np.repeat(np.linspace(1, data.shape[1], data.shape[1]),var.shape[0])+x_jittered
plt.scatter(cc,var.T.reshape(-1),c="#cccccc",marker=".",alpha=0.5,linewidths=0)
"""
# 或者這樣也可以
for i, d in enumerate(var.T):
x_ = (i+1)+np.random.uniform(-0.1, 0.1, size=var.shape[0])
plt.scatter(x_, d, c="#cccccc", marker=".", alpha=0.5, linewidths=0)
"""
# print(pca.components_)
# print(pca.explained_variance_ratio_)
# print(np.mean(pca.components_,axis=1).sum())
# plt.plot(pca.explained_variance_ratio_,marker='o')
# plt.legend()
plt.show()
3、變量載荷圖
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets
# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)
iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 將變量縮放為均值為0,標(biāo)準(zhǔn)偏差為1。使用StandardScaler也可以
# 使用自助法隨機(jī)抽樣
np.random.seed(0)
pca = PCA()
x_new = pca.fit_transform(data)
# 繪制載荷圖
fig, ax = plt.subplots()
b = ax.barh(range(1, data.shape[1]+1), pca.components_[0], color='#6699CC') # 第一主成分
# b = ax.barh(range(1, data.shape[1]+1), pca.components_[1], color='#6699CC') # 第二主成分
plt.show()
4、變量貢獻(xiàn)圖
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets
# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)
iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 將變量縮放為均值為0,標(biāo)準(zhǔn)偏差為1。使用StandardScaler也可以
# 使用自助法隨機(jī)抽樣
np.random.seed(0)
pca = PCA()
x_new = pca.fit_transform(data)
# 獲取每個(gè)特征對(duì)于每個(gè)主成分的貢獻(xiàn)率
explained_variance_ratio = pca.explained_variance_ratio_
# 計(jì)算每個(gè)變量的貢獻(xiàn)程度
variable_contribution = np.multiply(explained_variance_ratio[:, np.newaxis], pca.components_ ** 2)
def contri(x):
total_ = np.sum(x,axis=1,keepdims=True)
return x/total_
# 計(jì)算百分比
variable_contribution = contri(variable_contribution)*100
# 繪制變量貢獻(xiàn)圖
fig, ax = plt.subplots()
b = ax.barh(range(1, data.shape[1]+1), variable_contribution[0,:], color='#6699CC') # 第一主成分
# b = ax.barh(range(1, data.shape[1]+1), variable_contribution[1,:], color='#6699CC') # 第二主成分
plt.show()
注意:
其實(shí)變量貢獻(xiàn)圖就是雙標(biāo)圖中特征向量在不同主成分上的投影,也就是特征向量。我們?cè)诶L制變量貢獻(xiàn)圖的時(shí)候,其實(shí)對(duì)特征向量進(jìn)行平方就可以了(保證為正)。但是上述結(jié)果也是對(duì)的,因?yàn)橛?jì)算結(jié)果相同。
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets
# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)
iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data - np.mean(data, axis=0)) / np.std(data, axis=0) # 將變量縮放為均值為0,標(biāo)準(zhǔn)偏差為1。使用StandardScaler也可以
# 使用自助法隨機(jī)抽樣
np.random.seed(0)
pca = PCA()
x_new = pca.fit_transform(data)
# 獲取每個(gè)特征對(duì)于每個(gè)主成分的貢獻(xiàn)率
explained_variance_ratio = pca.explained_variance_ratio_
# 繪制變量貢獻(xiàn)圖
fig, ax = plt.subplots()
b = ax.barh(range(1, data.shape[1] + 1), (pca.components_ ** 2)[0, :], color='#6699CC') # 第一主成分
# b = ax.barh(range(1, data.shape[1]+1), (pca.components_ ** 2)[1,:], color='#6699CC') # 第二主成分
plt.show()
# pca.components_ ** 2的結(jié)果與第一個(gè)代碼計(jì)算的variable_contribution結(jié)果一致
5、附錄?
繪圖效果來(lái)自nature communications的一篇論文。
參考:Leaf-level coordination principles propagate to the ecosystem scale (https://doi.org/10.1038/s41467-023-39572-5)、主成分分析圖。
此圖相關(guān)R代碼、數(shù)據(jù):PCA雙標(biāo)圖、碎石圖R代碼、數(shù)據(jù)
6、注意(重要)
在主成分分析中,變量?經(jīng)常被縮放(即標(biāo)準(zhǔn)化)。當(dāng)變量以不同的尺度(例如:公斤、公里、厘米……)測(cè)量時(shí),特別推薦這樣做;否則,獲得的 PCA 輸出將受到嚴(yán)重影響。
目標(biāo)是使變量具有可比性。通常,變量被縮放以具有 標(biāo)準(zhǔn)偏差 1 和均值為零。
數(shù)據(jù)標(biāo)準(zhǔn)化是在 PCA 和聚類(lèi)分析之前廣泛用于基因表達(dá)數(shù)據(jù)分析的一種方法。當(dāng)變量的均值和/或標(biāo)準(zhǔn)差相差很大時(shí),我們可能還想對(duì)數(shù)據(jù)進(jìn)行縮放。
縮放變量時(shí),數(shù)據(jù)可以轉(zhuǎn)換如下:
mean(x) -?X的均值
sd(x) -?標(biāo)準(zhǔn)差文章來(lái)源:http://www.zghlxwxcb.cn/news/detail-661570.html
注意,我們?cè)谑褂肦語(yǔ)言,還有Origin進(jìn)行PCA時(shí)候,它們是默認(rèn)進(jìn)行了自動(dòng)標(biāo)準(zhǔn)化數(shù)據(jù),而python的PCA是沒(méi)有的,因此我們需要手動(dòng)計(jì)算,才能保持結(jié)果一致。文章來(lái)源地址http://www.zghlxwxcb.cn/news/detail-661570.html
到了這里,關(guān)于繪制 PCA 雙標(biāo)圖、碎石圖、變量載荷圖和變量貢獻(xiàn)圖的文章就介紹完了。如果您還想了解更多內(nèi)容,請(qǐng)?jiān)谟疑辖撬阉鱐OY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!