Go并发编程基本使用

  1. 基本使用
package main

import (
 "fmt"
 "sync"
)

var wg sync.WaitGroup

func hello() {
 fmt.Println("hello func...")
 wg.Done() // 通知计数器减1
}

func main() {

 wg.Add(4) // 计数器,4个并发任务
 go hello()
 go hello()
 go hello()
 go hello()
 fmt.Println("main func!")

 wg.Wait() // 等待所有任务执行完成
}
  1. 改造一下,开启10000个goroutine
package main

import (
 "fmt"
 "sync"
)

var wg sync.WaitGroup

func hello(i int) {
 fmt.Println("hello func...", i)
 wg.Done()
}

func main() {
 // wg.Add(10000)
 for i := 0; i < 10000; i++ {
  wg.Add(1)
  go hello(i)
 }
 fmt.Println("main func!")

 wg.Wait()
}
  1. 将上一个例子改造成匿名函数
package main

import (
 "fmt"
 "sync"
)

var wg sync.WaitGroup

func main() {
 // wg.Add(10000)
 for i := 0; i < 10000; i++ {
  go func(i int) {
   fmt.Println("hello...", i)
  }(i)
 }
 fmt.Println("main func!")

 wg.Wait()
}
  1. 指定占用CPU核心数
package main

import (
 "fmt"
 "runtime"
 "sync"
)

var wg sync.WaitGroup

func test1() {
 for i := 0; i < 10; i++ {
  fmt.Println("func test1...", i)
 }
}

func test2() {
 for i := 0; i < 10; i++ {
  fmt.Println("func test2...", i)
 }
}

func main() {
 runtime.GOMAXPROCS(1) // 只占用1个CPU核心
 wg.Add(2)
 go test1()
 go test2()
 wg.Wait()
}
  1. 带缓冲区的通道,类似于异步的操作
package main

import "fmt"

func main() {
 ch1 := make(chan int, 1) // 只能存放1个值的缓冲区通道
 ch1 <- 10  // 发送
 x := <-ch1 // 接收
 fmt.Println(x)
 close(ch1)
}
  1. 无缓冲区的通道,又称为同步通道
package main

import "fmt"

func main() {
 ch1 := make(chan int) // 无缓冲区通道,又称为同步通道,
 ch1 <- 10             // 此时这里会处于阻塞的状态,除非有另外一个goroutine去取值,它才会发送
 x := <-ch1
 fmt.Println(x)
 close(ch1)
}
  1. 获取通道的容量和通道里的元素数量
package main

import "fmt"

func main() {
 ch1 := make(chan int, 10)
 ch1 <- 89
 ch1 <- 70
 fmt.Println(len(ch1)) // 获取通道中元素的数量
 fmt.Println(cap(ch1)) // 获取通道的容量
 close(ch1)
}
  1. 通道和goroutine的小栗子
package main

import (
 "fmt"
 "sync"
)

type myinterface interface{}

var ch1 = make(chan myinterface, 1)
var wg sync.WaitGroup

func sendData(i myinterface) {
 fmt.Printf("向通道发送 %v 成功\n", i)
 ch1 <- i
 wg.Done()
}

func readData() {
 v := <-ch1
 fmt.Println("从通道获取的值:", v)
 wg.Done()
}

func main() {
 nameArray := []string{"ttr", "tantianran"}

 wg.Add(2)
 go sendData(nameArray)
 go readData()
 wg.Wait()
}
  1. 通道+goroutine,实现协同干活例子2
package main

import "fmt"

func producer(ch chan int) {
 for i := 0; i < 10; i++ {
  ch <- i
 }
 close(ch)
}

func consumer(ch1 chan int, ch2 chan int) {
 for {
  v, ok := <-ch1
  if !ok {
   break
  }
  ch2 <- v * 2
 }
 close(ch2)
}

func main() {
 ch1 := make(chan int, 100)
 ch2 := make(chan int, 200)

 go producer(ch1)
 go consumer(ch1, ch2)

 for i := range ch2 {
  fmt.Println(i)
 }
}




// Copyright © 2021 Alibaba Group Holding Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package applydrivers

import (
"fmt"
"os"

v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/version"

"github.com/labring/sealos/pkg/apply/processor"
"github.com/labring/sealos/pkg/client-go/kubernetes"
"github.com/labring/sealos/pkg/clusterfile"
"github.com/labring/sealos/pkg/constants"
v2 "github.com/labring/sealos/pkg/types/v1beta1"
"github.com/labring/sealos/pkg/utils/iputils"
"github.com/labring/sealos/pkg/utils/logger"
"github.com/labring/sealos/pkg/utils/yaml"
)

func NewDefaultApplier(cluster *v2.Cluster, cf clusterfile.Interface, images []string) (Interface, error) {
if cluster.Name == "" {
return nil, fmt.Errorf("cluster name cannot be empty")
}
if cf == nil {
cf = clusterfile.NewClusterFile(constants.Clusterfile(cluster.Name))
}
err := cf.Process()
if !cluster.CreationTimestamp.IsZero() && err != nil {
return nil, err
}

return &Applier{
ClusterDesired: cluster,
ClusterFile: cf,
ClusterCurrent: cf.GetCluster(),
RunNewImages: images,
}, nil
}

func NewDefaultScaleApplier(current, cluster *v2.Cluster) (Interface, error) {
if cluster.Name == "" {
cluster.Name = current.Name
}
cFile := clusterfile.NewClusterFile(constants.Clusterfile(cluster.Name))
return &Applier{
ClusterDesired: cluster,
ClusterFile: cFile,
ClusterCurrent: current,
}, nil
}

type Applier struct {
ClusterDesired *v2.Cluster
ClusterCurrent *v2.Cluster
ClusterFile clusterfile.Interface
Client kubernetes.Client
CurrentClusterInfo *version.Info
RunNewImages []string
}

func (c *Applier) Apply() error {
clusterPath := constants.Clusterfile(c.ClusterDesired.Name)
var err error
defer func() {
logger.Debug("write cluster file to local storage: %s", clusterPath)
saveerror := yaml.MarshalYamlToFile(clusterPath, c.getWriteBackObjects()...)
if err == nil {
err = saveerror
}
}()
c.initStatus()
if c.ClusterDesired.CreationTimestamp.IsZero() && (c.ClusterCurrent == nil || c.ClusterCurrent.CreationTimestamp.IsZero()) {
err = c.initCluster()
c.ClusterDesired.CreationTimestamp = metav1.Now()
} else {
err = c.reconcileCluster()
c.ClusterDesired.CreationTimestamp = c.ClusterCurrent.CreationTimestamp
}
c.updateStatus(err)
return err
}

func (c *Applier) getWriteBackObjects() []interface{} {
obj := []interface{}{c.ClusterDesired}
if configs := c.ClusterFile.GetConfigs(); len(configs) > 0 {
for i := range configs {
obj = append(obj, configs[i])
}
}
return obj
}

func (c *Applier) initStatus() {
c.ClusterDesired.Status.Phase = v2.ClusterInProcess
c.ClusterDesired.Status.Conditions = make([]v2.ClusterCondition, 0)
}

// todo: atomic updating status after each installation for better reconcile?
// todo: set up signal handler
func (c *Applier) updateStatus(err error) {
condition := v2.ClusterCondition{
Type: "ApplyClusterSuccess",
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Now(),
Reason: "Ready",
Message: "Applied to cluster successfully",
}
c.ClusterDesired.Status.Phase = v2.ClusterSuccess
if err != nil {
condition.Status = v1.ConditionFalse
condition.Reason = "ApplyClusterError"
condition.Message = err.Error()
logger.Error("Applied to cluster error: %v", err)
}
if err != nil {
c.ClusterDesired.Status.Phase = v2.ClusterFailed
}
c.ClusterDesired.Status.Conditions = v2.UpdateCondition(c.ClusterDesired.Status.Conditions, condition)
}

func (c *Applier) reconcileCluster() error {
// sync newVersion pki and etc dir in `.sealos/default/pki` and `.sealos/default/etc`
processor.SyncNewVersionConfig(c.ClusterDesired.Name)
if len(c.RunNewImages) != 0 {
logger.Debug("run new images: %+v", c.RunNewImages)
if err := c.installApp(c.RunNewImages); err != nil {
return err
}
}
mj, md := iputils.GetDiffHosts(c.ClusterCurrent.GetMasterIPAndPortList(), c.ClusterDesired.GetMasterIPAndPortList())
nj, nd := iputils.GetDiffHosts(c.ClusterCurrent.GetNodeIPAndPortList(), c.ClusterDesired.GetNodeIPAndPortList())

return c.scaleCluster(mj, md, nj, nd)
}

func (c *Applier) initCluster() error {
logger.Info("Start to create a new cluster: master %s, worker %s, registry %s", c.ClusterDesired.GetMasterIPList(), c.ClusterDesired.GetNodeIPList(), c.ClusterDesired.GetRegistryIP())
createProcessor, err := processor.NewCreateProcessor(c.ClusterDesired.Name, c.ClusterFile)
if err != nil {
return err
}

if err = createProcessor.Execute(c.ClusterDesired); err != nil {
return err
}

logger.Info("succeeded in creating a new cluster, enjoy it!")

return nil
}

func (c *Applier) installApp(images []string) error {
logger.Info("start to install app in this cluster")
err := c.ClusterFile.Process()
if err != nil {
return err
}
installProcessor, err := processor.NewInstallProcessor(c.ClusterFile, images)
if err != nil {
return err
}
err = installProcessor.Execute(c.ClusterDesired)
if err != nil {
return err
}
return nil
}

func (c *Applier) scaleCluster(mj, md, nj, nd []string) error {
if len(mj) == 0 && len(md) == 0 && len(nj) == 0 && len(nd) == 0 {
logger.Info("no nodes that need to be scaled")
return nil
}
logger.Info("start to scale this cluster")
logger.Debug("current cluster: master %s, worker %s", c.ClusterCurrent.GetMasterIPAndPortList(), c.ClusterCurrent.GetNodeIPAndPortList())
logger.Debug("desired cluster: master %s, worker %s", c.ClusterDesired.GetMasterIPAndPortList(), c.ClusterDesired.GetNodeIPAndPortList())
scaleProcessor, err := processor.NewScaleProcessor(c.ClusterFile, c.ClusterDesired.Name, c.ClusterDesired.Spec.Image, mj, md, nj, nd)
if err != nil {
return err
}
cluster := c.ClusterDesired
err = scaleProcessor.Execute(cluster)
if err != nil {
return err
}
logger.Info("succeeded in scaling this cluster")
return nil
}

func (c *Applier) Delete() error {
t := metav1.Now()
c.ClusterDesired.DeletionTimestamp = &t
defer func() {
cfPath := constants.Clusterfile(c.ClusterDesired.Name)
target := fmt.Sprintf("%s.%d", cfPath, t.Unix())
logger.Debug("write reset cluster file to local: %s", target)
if err := yaml.MarshalYamlToFile(cfPath, c.getWriteBackObjects()...); err != nil {
logger.Error("failed to store cluster file: %v", err)
}
_ = os.Rename(cfPath, target)
}()
return c.deleteCluster()
}

func (c *Applier) deleteCluster() error {
deleteProcessor, err := processor.NewDeleteProcessor(c.ClusterDesired.Name, c.ClusterFile)
if err != nil {
return err
}

if err := deleteProcessor.Execute(c.ClusterDesired); err != nil {
return err
}

logger.Info("succeeded in deleting current cluster")
return nil
}
 
posted @ 2023-02-09 15:57  技术颜良  阅读(19)  评论(0编辑  收藏  举报