https://www.jianshu.com/p/e8b26dc7884a
package main
import (
"fmt"
"math/rand"
"sync"
"time"
)
// SimpleData stuct
type SimpleData struct {
ID int
}
// Work define
func Work(allData []SimpleData) {
start := time.Now()
for i := range allData {
Process(allData[i])
}
fmt.Printf("Took ===============> %s\n", time.Since(start))
}
// Process define
func Process(data SimpleData) {
fmt.Printf("Start processing %d\n", data.ID)
time.Sleep(100 * time.Millisecond)
fmt.Printf("Finish processing %d\n", data.ID)
}
// NotPooledWork define
func NotPooledWork(allData []SimpleData) {
start := time.Now()
var wg sync.WaitGroup
dataCh := make(chan SimpleData, 10)
wg.Add(1)
go func() {
defer wg.Done()
for data := range dataCh {
wg.Add(1)
go func(data SimpleData) {
defer wg.Done()
Process(data)
}(data)
}
}()
for i := range allData {
dataCh <- allData[i]
}
close(dataCh)
wg.Wait()
fmt.Printf("Took ===============> %s\n", time.Since(start))
}
// PooledWork define
func PooledWork(allData []SimpleData) {
start := time.Now()
var wg sync.WaitGroup
workerPoolSize := 10
dataCh := make(chan SimpleData, workerPoolSize)
for i := 0; i < workerPoolSize; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for data := range dataCh {
Process(data)
}
}()
}
for i := range allData {
dataCh <- allData[i]
}
close(dataCh)
wg.Wait()
fmt.Printf("Took ===============> %s\n", time.Since(start))
}
// PooledWorkError define
func PooledWorkError(allData []SimpleData) {
start := time.Now()
var wg sync.WaitGroup
workerPoolSize := 10
dataCh := make(chan SimpleData, workerPoolSize)
errors := make(chan error, 1000)
for i := 0; i < workerPoolSize; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for data := range dataCh {
ProcessError(data, errors)
}
}()
}
for i := range allData {
dataCh <- allData[i]
}
close(dataCh)
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case err := <-errors:
fmt.Println("finished with error:", err.Error())
case <-time.After(time.Second * 1):
fmt.Println("Timeout: errors finished")
return
}
}
}()
defer close(errors)
wg.Wait()
fmt.Printf("Took ===============> %s\n", time.Since(start))
}
// ProcessError define
func ProcessError(data SimpleData, errors chan<- error) {
fmt.Printf("Start processing %d\n", data.ID)
time.Sleep(100 * time.Millisecond)
if data.ID%29 == 0 {
errors <- fmt.Errorf("error on job %v", data.ID)
} else {
fmt.Printf("Finish processing %d\n", data.ID)
}
}
// Task define
type Task struct {
Err error
Data interface{}
f func(interface{}) error
}
// NewTask define
func NewTask(f func(interface{}) error, data interface{}) *Task {
return &Task{f: f, Data: data}
}
func process(workerID int, task *Task) {
fmt.Printf("Worker %d processes task %v\n", workerID, task.Data)
task.Err = task.f(task.Data)
}
// Worker handles all the work
type Worker struct {
ID int
taskChan chan *Task
quit chan bool
}
// NewWorker returns new instance of worker
func NewWorker(channel chan *Task, ID int) *Worker {
return &Worker{
ID: ID,
taskChan: channel,
quit: make(chan bool),
}
}
// Start starts the worker
func (wr *Worker) Start(wg *sync.WaitGroup) {
fmt.Printf("Starting worker %d\n", wr.ID)
wg.Add(1)
go func() {
defer wg.Done()
for task := range wr.taskChan {
process(wr.ID, task)
}
}()
}
// StartBackground starts the worker in background waiting
func (wr *Worker) StartBackground() {
fmt.Printf("Starting worker %d\n", wr.ID)
for {
select {
case task := <-wr.taskChan:
process(wr.ID, task)
case <-wr.quit:
return
}
}
}
// Stop quits the worker
func (wr *Worker) Stop() {
fmt.Printf("Closing worker %d\n", wr.ID)
go func() {
wr.quit <- true
}()
}
// Pool is the worker pool
type Pool struct {
Tasks []*Task
Workers []*Worker
concurrency int
collector chan *Task
runBackground chan bool
wg sync.WaitGroup
}
// NewPool initializes a new pool with the given tasks and at the given concurrency.
func NewPool(tasks []*Task, concurrency int) *Pool {
return &Pool{
Tasks: tasks,
concurrency: concurrency,
collector: make(chan *Task, 1000),
}
}
// Run runs all work within the pool and blocks until it's finished.
func (p *Pool) Run() {
for i := 1; i <= p.concurrency; i++ {
worker := NewWorker(p.collector, i)
worker.Start(&p.wg)
}
for i := range p.Tasks {
p.collector <- p.Tasks[i]
}
close(p.collector)
p.wg.Wait()
}
// AddTask adds a task to the pool
func (p *Pool) AddTask(task *Task) {
p.collector <- task
}
// RunBackground runs the pool in background
func (p *Pool) RunBackground() {
go func() {
for {
fmt.Print("⌛ Waiting for tasks to come in ...\n")
time.Sleep(10 * time.Second)
}
}()
for i := 1; i <= p.concurrency; i++ {
worker := NewWorker(p.collector, i)
p.Workers = append(p.Workers, worker)
go worker.StartBackground()
}
for i := range p.Tasks {
p.collector <- p.Tasks[i]
}
p.runBackground = make(chan bool)
<-p.runBackground
}
// Stop stops background workers
func (p *Pool) Stop() {
for i := range p.Workers {
p.Workers[i].Stop()
}
p.runBackground <- true
}
func main() {
// generate all data
var allData []SimpleData
for i := 0; i < 1000; i++ {
data := SimpleData{ID: i}
allData = append(allData, data)
}
// generate all task
var allTask []*Task
for i := 1; i <= 100; i++ {
task := NewTask(func(data interface{}) error {
taskID := data.(int)
time.Sleep(100 * time.Millisecond)
fmt.Printf("Task %d processed\n", taskID)
return nil
}, i)
allTask = append(allTask, task)
}
pool := NewPool(allTask, 5)
// run synchronously
// Work(allData)
// run without any pooling
// NotPooledWork(allData)
// run with pooling
// PooledWork(allData)
// run with pooling that handles errors
// PooledWorkError(allData)
// run robust worker pool
// pool.Run()
// run robust worker pool in background
go func() {
for {
taskID := rand.Intn(100) + 20
if taskID%7 == 0 {
pool.Stop()
}
time.Sleep(time.Duration(rand.Intn(5)) * time.Second)
task := NewTask(func(data interface{}) error {
taskID := data.(int)
time.Sleep(100 * time.Millisecond)
fmt.Printf("Task %d processed\n", taskID)
return nil
}, taskID)
pool.AddTask(task)
}
}()
pool.RunBackground()
}
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· TypeScript + Deepseek 打造卜卦网站:技术与玄学的结合
· 阿里巴巴 QwQ-32B真的超越了 DeepSeek R-1吗?
· 【译】Visual Studio 中新的强大生产力特性
· 【设计模式】告别冗长if-else语句:使用策略模式优化代码结构
· AI与.NET技术实操系列(六):基于图像分类模型对图像进行分类