python学习笔记-多进程

multiprocessing

from multiprocessing import Process
import time
def f(name):
    time.sleep(2)
    print('hello',name)

if __name__ == '__main__':
    p1 = Process(target=f,args = ('bob',))
    p2 = Process(target=f,args=('john',))
    p1.start()
    p2.start()
    p1.join()

 

显示进程ID 

# !/usr/bin/env python
# -*- coding:utf-8 -*-

from multiprocessing import Process
import os

def info(title):
    print(title)
    print('module name:', __name__)
    print('parent process:', os.getppid()) # 父进程ID
    print('process id:', os.getpid()) #自己的进程ID
    print("\n\n")

def f(name):
    info('\033[31;1mfunction f\033[0m')
    print('hello', name)

if __name__ == '__main__':
    info('\033[32;1mmain process line\033[0m')
    p = Process(target=info, args=('bob',))
    p.start()
    p.join()

 

进程间通讯

不同进程间内存是不共享的,要想实现两个进程间的数据交换,可以用以下方法:

Queues

# !/usr/bin/env python
# -*- coding:utf-8 -*-
from multiprocessing import Process, Queue

def f(q):
    q.put([42, None, 'hello'])

if __name__ == '__main__':
    q = Queue() #创建队列
    p = Process(target=f, args=(q,))
    p2 = Process(target=f, args=(q,))
    p.start()
    p2.start()
    print('from parent',q.get())    #获取子进程put的值
    print('from parent',q.get())
    p.join()

 

Pipes

The Pipe() function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). 

# !/usr/bin/env python
# -*- coding:utf-8 -*-

from multiprocessing import Process, Pipe

def f(conn):
    conn.send([42, None, 'hello']) #发送数据
    conn.close()

if __name__ == '__main__':
    parent_conn, child_conn = Pipe()
    p = Process(target=f, args=(child_conn,))
    p2 = Process(target=f, args=(child_conn,))
    p.start()
    p2.start()
    print(parent_conn.recv())  # 接受数据
    print(parent_conn.recv())
    p.join()

 

Managers

A manager object returned by Manager() controls a server process which holds Python objects and allows other processes to manipulate them using proxies.

A manager returned by Manager() will support types listdictNamespaceLockRLockSemaphoreBoundedSemaphoreConditionEventBarrierQueueValue and Array.

# !/usr/bin/env python
# -*- coding:utf-8 -*-
from multiprocessing import Process, Manager

def f(d, l):
    d[1] = '1'
    d['2'] = 2
    d[0.25] = None
    l.append(1)
    print(l)

if __name__ == '__main__':
    with Manager() as manager:
        d = manager.dict()
        l = manager.list(range(5))
        p_list = []
        for i in range(10):
            p = Process(target=f, args=(d, l))
            p.start()
            p_list.append(p)
        for res in p_list:
            res.join()

        print(d)
        print(l)

 

进程同步

Without using the lock output from the different processes is liable to get all mixed up.

from multiprocessing import Process, Lock
 
def f(l, i):
    l.acquire()
    try:
        print('hello world', i)
    finally:
        l.release()
 
if __name__ == '__main__':
    lock = Lock()
 
    for num in range(10):
        Process(target=f, args=(lock, num)).start()

 

进程池  

进程池内部维护一个进程序列,当使用时,则去进程池中获取一个进程,如果进程池序列中没有可供使用的进进程,那么程序就会等待,直到进程池中有可用进程为止。

进程池中有两个方法:

  • apply
  • apply_async
from  multiprocessing import Process,Pool,freeze_support
import time

def Foo(i):
    time.sleep(4)
    print('exec...')
    return i+100

def Bar(arg):
    print('-->exec done:',arg)


if __name__ == '__main__':
    freeze_support()
    pool = Pool(5)
    for i in range(100):
        pool.apply_async(func=Foo, args=(i,),callback=Bar)
        #pool.apply(func=Foo, args=(i,))
    print('end')
    pool.close()
    pool.join()
    #pool.join()#进程池中进程执行完毕后再关闭,如果注释,那么程序直接关闭。

 

posted @ 2016-03-16 17:17  tec2019  阅读(259)  评论(0编辑  收藏  举报