python3 怎么统计英文文档常用词?(附解释)

# coding: utf-8

# In[32]:

#import requests
#from bs4 import BeautifulSoup
#res = requests.get("http://www.guancha.cn/america/2017_01_21_390488_s.shtml")
#res.encoding = 'utf-8'
#soup = BeautifulSoup(res.text,'lxml')


# In[66]:

speech_new = open("speech.txt",'r',encoding = 'utf-8').read()   #当然你要有个英文文档
speech = speech_new.lower().split()      #lower() 把全部大写变小写, spltt()分割字符串 默认为空格


# In[70]:

dic = {}
for i in speech:      
    if i not in dic:    #如果字符串不在dic字典里面
        dic[i] = 1      #就加上去并附上1值
    else:
        dic[i] = dic[i] + 1    #有了的话值就加1 


# In[68]:

import operator
list = sorted(dic.items(),key = operator.itemgetter(1), reverse=True)    #dic items() , 
                                                                         #key = operator.itemgetter(1)以什么排序,我们tuple里面有0还有1,我们输入1
                                                                         #reverse=True 大小排序


# In[94]:

from nltk.corpus import stopwords   #自然语言处理
stop_words = stopwords.words('English') #取出英文停用词


# In[103]:

for k,v in list:            #把tuple里面0给k,1给v
    if k not in stop_words:
        print(k,v)

  

 

但是python3自带有个非常牛逼的东西

# In[108]:

from collections import Counter #2.6以后才出现的数据结构
c = Counter(speech)


# In[111]:

c.most_common(10)


# In[113]:

for sw in stop_words:
    del c[sw]   #删除里面的停用词


# In[114]:

c.most_common(10)

  非常简单的就统计出来了

posted @ 2017-04-08 02:06  Tsukasa鱼  阅读(637)  评论(0编辑  收藏  举报