利用RNAseq数据做聚类分析

library(ConsensusClusterPlus)
library(factoextra)
library(cluster)
library(NbClust)
# 读入数据
data = read.table("T_405_ex.txt",header = T, row.names = 1)
b = matrix(data, nrow = 1, ncol = 1)
new<-as.matrix(t(data))
is.matrix(new)

#标准化

my_data <- na.omit(new)
my_data <- scale(my_data)
head(my_data, n = 3)

get_clust_tendency(my_data, n = 50,gradient = list(low = "steelblue",  high = "white"))

#n:the number of points selected from sample space which is also the number of points selected from the given sample(data).(不能大于样本个数)

$hopkins_stat
[1] 0.2837771

# If the value of Hopkins statistic is close to zero (far below 0.5), then we can conclude that the dataset is significantly clusterable.(本次结果小于0.5,证明样本之间有一定的聚类特性。)

$plot

#选择最好的分类K

library("NbClust")
res.nbclust <- NbClust(my_data, distance = "euclidean", min.nc = 2, max.nc = 10,  method = "complete", index ="all")

 

 

 

*** : The Hubert index is a graphical method of determining the number of clusters.
In the plot of Hubert index, we seek a significant knee that corresponds to a
significant increase of the value of the measure i.e the significant peak in Hubert
index second differences plot.

*** : The D index is a graphical method of determining the number of clusters.
In the plot of D index, we seek a significant knee (the significant peak in Dindex
second differences plot) that corresponds to a significant increase of the value of
the measure.

*******************************************************************
* Among all indices:
* 11 proposed 2 as the best number of clusters
* 5 proposed 3 as the best number of clusters
* 3 proposed 4 as the best number of clusters
* 4 proposed 6 as the best number of clusters

***** Conclusion *****

* According to the majority rule, the best number of clusters is 2

#样本聚类
factoextra::fviz_nbclust(res.nbclust) + theme_minimal()

res.hc <- eclust(my_data, "hclust", k = 2, graph = FALSE)
fviz_dend(res.hc, rect = TRUE, show_labels = FALSE)

 


fviz_silhouette(res.hc)

 


res.hc

#计算标签基因

library(pamr)
stad.data <- pamr.from.excel("k2_ex.txt", 417, sample.labels=TRUE)
#如果有缺失值
stad.data2 <- pamr.knnimpute(stad.data)

model <- pamr.train(stad.data2)
model
#You get a table with 3 columns and 30 rows.
#The rows correspond to threshold values (first column).
#For each threshold you see the number of surviving genes (second column)
#and the number of misclassifications on the training set (third column).


#验证
model.cv <- pamr.cv(model, stad.data2, nfold = 10)
model.cv
pamr.plotcv(model.cv)
#Using the results of cross validation, choose a threshold value Delta as a tradeoff
#between a small number of genes and a good generalization accuracy.

 


Delta = 11
pamr.plotcen(model, stad.data2, Delta)

 

 

dev.print(file = "MYcentroids_k2.ps")
dev.print(device = pdf, file = "MYcentroids_k2.pdf")
#The next function prints a 2 × 2 confusion table,
#which tells us how many samples in each class were predicted correctly.
pamr.confusion(model.cv, Delta)
#To get a visual impression of how clearly the two classes are separated by PAM
pamr.plotcvprob(model, stad.data2, Delta)

 


#The following command plots for each gene surviving the threshold a figure showing the expression levels of this gene over the whole set of samples.
#You will see which genes are up- or downregulated and how variable they are.
pamr.geneplot(model, stad.data2, Delta)

#导出标签基因

result <- pamr.listgenes(model, stad.data2, Delta, genenames = TRUE)
write.table(result,file="k2_result_gene.txt")

 

posted @ 2016-09-19 09:57  qinqinyang  阅读(6574)  评论(0编辑  收藏  举报