Python 练习册

 

01:将你的 QQ 头像(或者微博头像)右上角加上红色的数字,类似于微信未读信息数量那种提示效果

【图像处理】

类似于图中效果:

 

 

 

py 2.7代码:

from PIL import Image, ImageDraw, ImageFont
def add_word(img):
    char_size = 30
    fillcolor = "#ff0000"
    draw = ImageDraw.Draw(img)
    my_font = ImageFont.truetype(r'C:\Windows\Fonts\SIMYOU.TTF', char_size)#从本地载入字体文件
    width, height = img.size
    draw.text((width - char_size,char_size-20), '1', font=my_font, fill=fillcolor) 
    img.save('result.jpg','JPEG')
    del draw

if __name__ == "__main__":
    img = Image.open('test.jpg')
    add_word(img)

 

 

更多:

draw.line((0, 0) + im.size, fill=128)  #画一道线

参考文档:

pillow 函数接口查询 官方文档

 


02:任一个英文的纯文本文件,统计其中的单词出现的个数【文本处理】

 

import re

def statis_words(article): 
        re_pat = re.compile("\W",re.S)
        pre_article = re.sub(re_pat," ",article)
        re_pat2 = re.compile(" *",re.S)
        list_words = re_pat2.split(pre_article)
        dict_re = dict.fromkeys(list_words)
        for i in list_words:
            if not dict_re[i]:
                dict_re[i] = 0
            if i in list_words:
                dict_re[i]+=1
        for i in dict_re.iteritems():#打印
            print i

if __name__ == "__main__": 
    file_path = "words.txt"
    article = ""          
    with open(file_path) as f:
        for i in f.readlines():
            article += i
    statis_words(article.replace("\n",' '))
    

 

 


03:你有一个目录,装了很多照片,把它们的尺寸变成都不大于 iPhone5 分辨率的大小【图像处理】

 

 

import os
from PIL import Image

iPhone5_WIDTH = 1136
iPhone5_HEIGHT = 640

def resize_iPhone5_pic(path, new_path, width=iPhone5_WIDTH, height=iPhone5_HEIGHT):
    im = Image.open(path)
    w,h = im.size

    if w > width:
        h = width * h // w
        w = width
    if h > height:
        w = height * w // h
        h = height

    im_resized = im.resize((w,h), Image.ANTIALIAS)
    im_resized.save(new_path)


def walk_dir_and_resize(path):
    for root, dirs, files in os.walk(path):#递归path下所有目录
        for f_name in files:
            if f_name.lower().endswith('jpg'):
                path_dst = os.path.join(root,f_name)
                f_new_name = 'iPhone5_' + f_name
                resize_iPhone5_pic(path=path_dst, new_path=f_new_name)

if __name__ == '__main__':
    walk_dir_and_resize('./')#当前目录

 

核心函数  image.resize()

Image.resize(sizeresample=0)

Returns a resized copy of this image.

Parameters:
  • size – The requested size in pixels, as a 2-tuple: (width, height).
  • resample – An optional resampling filter. This can be one of PIL.Image.NEAREST (use nearest neighbour), PIL.Image.BILINEAR (linear interpolation), PIL.Image.BICUBIC(cubic spline interpolation), or PIL.Image.LANCZOS (a high-quality downsampling filter). If omitted, or if the image has mode “1” or “P”, it is set PIL.Image.NEAREST.
Returns:

An Image object.

 

size: 图像宽度,长度

resample:

PIL.Image.NEAREST (use nearest neighbour)   最近邻插值法

 PIL.Image.BILINEAR (linear interpolation),   双线性插值法

PIL.Image.BICUBIC(cubic spline interpolation), 双三次插值

or PIL.Image.LANCZOS (a high-quality downsampling filter)   Lanczos算法  采样放缩算法

缩小时 ANTIALIAS

更多图像处理请参考 opencv

 

reference:Image Module

 


04:你有一个目录,放了你一个月的日记,都是 txt,为了避免分词的问题,假设内容都是英文,请统计出你认为每篇日记最重要的词【字符串处理】【文件管理】

注:暂认为出现频率最多的为最重要的

#文件管理
#coding:utf-8
import os import re def analyse_article(article): re_pat = re.compile("(?=[\n\x21-\x7e]+)[^A-Za-z0-9]")#+|[{}【】。,;“‘”?]")#("^([\u4e00-\u9fa5]+|[a-zA-Z0-9]+)$")#("(?=[\x21-\x7e]+)[^A-Za-z0-9]+|["{}【】。,;’“‘”?"]")#("[\W\u4e00-\u9fa5] ",re.S) \s 空格符 pre_article = re.sub(re_pat," ",article) chinese_symbol = ["\xa1\xa3","\xa1\xb0","\xa1\xb1","\xa3\xac","\xa1\xbe","\xa1\xbf","\xa1\xb6","\xa1\xb7","\xa3\xba","\xa3\xbb"]#中文标点 for i in chinese_symbol: pre_article = pre_article.replace(i," ") re_pat2 = re.compile(" *",re.S) list_words = re_pat2.split(pre_article) dict_re = dict.fromkeys(list_words) #print pre_article for i in list_words: if not dict_re[i]: dict_re[i] = 0 if i in list_words: dict_re[i]+=1 if dict_re.get(""): del dict_re[""] key_words = sorted(dict_re.items(),key = lambda e:e[1])[-1] return (key_words[0], key_words[1]) def walk_dir_and_analyse(path): text = "" key_words_list = [] for root, dirs, files in os.walk(path):#递归path下所有目录 for f_name in files: if f_name.lower().endswith('txt'): with open(os.path.join(root,f_name)) as f: for i in f.readlines(): text += i key_words_list.append(analyse_article(text)) for i in key_words_list: print "\""+ i[0] + "\" for "+ str(i[1]) +" times" if __name__ == "__main__": walk_dir_and_analyse("./")

 输出

>python 4.py
"春眠不觉晓" for 2 times

 

 

 


05:敏感词文本文件 filtered_words.txt,当用户输入敏感词语,则用星号 * 替换,例如当用户输入「北京是个好城市」,则变成「**是个好城市」。

#[字符串处理]
#敏感词文本文件 filtered_words.txt,
#里面的内容为以下内容,当用户输入敏感词语时,
#python ' '中将自动加入结尾符号,要注意字串实际长度,包括读入txt文件时的字符串长度
#coding:utf-8

def words_filter(path,words_list):
    content = ""
    with open(path) as f:
        for i in f.readlines():
            for j in words_list:
                if j in i:
                    i = i.replace(j,"*"*(len(j)/(len('')-1)))  #一个中文两个字节长度
            content += i
        return content
         
        
if __name__ == "__main__":
    word_path = "filtered_words.txt"
    path = "words.txt"
    
    words_list = []
    with open(word_path) as f:
        for i in f.readlines():
            words_list.append(i.replace("\n",""))
    print words_filter(path,words_list)    

 

posted @ 2016-04-17 19:11  LandFlow  阅读(502)  评论(0编辑  收藏  举报