it_worker365

   ::  ::  ::  ::  :: 管理

命令行方式是怎么支持的?

看源码,命令总入口在main.go中,main方法中使用了cmd.NewAPISIXIngressControllerCommand()

package main

import (
    "fmt"
    "os"
    
    "github.com/apache/apisix-ingress-controller/cmd"
)

func main() {
    root := cmd.NewAPISIXIngressControllerCommand()
    if err := root.Execute(); err != nil {
        fmt.Fprintln(os.Stderr, err.Error())
        os.Exit(1)
    }
}

来看看下一步的内容是啥 - cmd/ingress/cmd.go里开始了主命令,且加入了其他ingress命令的入口

package cmd

import (
    "fmt"
    "github.com/spf13/cobra"
    "github.com/apache/apisix-ingress-controller/cmd/ingress"
    "github.com/apache/apisix-ingress-controller/pkg/version"
)

func newVersionCommand() *cobra.Command {
    var long bool
    cmd := &cobra.Command{
        Use:   "version",
        Short: "version for apisix-ingress-controller",
        Run: func(cmd *cobra.Command, _ []string) {
            if long {
                fmt.Print(version.Long())
            } else {
                fmt.Printf("apisix-ingress-controller version %s\n", version.Short())
            }
        },
    }

    cmd.PersistentFlags().BoolVar(&long, "long", false, "show long mode version information")
    return cmd
}

// NewAPISIXIngressControllerCommand creates the apisix-ingress-controller command.
func NewAPISIXIngressControllerCommand() *cobra.Command {
    cmd := &cobra.Command{
        Use:     "apisix-ingress-controller [command]",
        Long:    "Yet another Ingress controller for Kubernetes using Apache APISIX as the high performance reverse proxy.",
        Version: version.Short(),
    }

    cmd.AddCommand(ingress.NewIngressCommand())
    cmd.AddCommand(newVersionCommand())
    return cmd
}

下级命令有什么,具体看cmd/ingress/ingress.go里定义了哪些

package ingress

import (
    "encoding/json"
    "fmt"
    "os"
    "os/signal"
    "strings"
    "syscall"
    "time"

    "github.com/spf13/cobra"

    "github.com/apache/apisix-ingress-controller/pkg/config"
    controller "github.com/apache/apisix-ingress-controller/pkg/ingress"
    "github.com/apache/apisix-ingress-controller/pkg/log"
    "github.com/apache/apisix-ingress-controller/pkg/version"
)

func dief(template string, args ...interface{}) {
    if !strings.HasSuffix(template, "\n") {
        template += "\n"
    }
    fmt.Fprintf(os.Stderr, template, args...)
    os.Exit(1)
}

func waitForSignal(stopCh chan struct{}) {
    sigCh := make(chan os.Signal, 1)
    signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)

    sig := <-sigCh
    log.Infof("signal %d (%s) received", sig, sig.String())
    close(stopCh)
}

// NewIngressCommand creates the ingress sub command for apisix-ingress-controller.
func NewIngressCommand() *cobra.Command {
    var configPath string
    cfg := config.NewDefaultConfig()

    cmd := &cobra.Command{
        Use: "ingress [flags]",
        Long: `launch the ingress controller

You can run apisix-ingress-controller from configuration file or command line options,
if you run it from configuration file, other command line options will be ignored.

Run from configuration file:

    apisix-ingress-controller ingress --config-path /path/to/config.json

Both json and yaml are supported as the configuration file format.

Run from command line options:

    apisix-ingress-controller ingress --apisix-base-url http://apisix-service:9180/apisix/admin --kubeconfig /path/to/kubeconfig

For Kubernetes cluster version older than v1.19.0, you should always set the --ingress-version option to networking/v1beta1:

    apisix-ingress-controller ingress \
      --apisix-base-url http://apisix-service:9180/apisix/admin \
      --kubeconfig /path/to/kubeconfig \
      --ingress-version networking/v1beta1

If your Kubernetes cluster version is prior to v1.14+, only ingress.extensions/v1beta1 can be used.

    apisix-ingress-controller ingress \
      --apisix-base-url http://apisix-service:9180/apisix/admin \
      --kubeconfig /path/to/kubeconfig \
      --ingress-version extensions/v1beta1

If you run apisix-ingress-controller outside the Kubernetes cluster, --kubeconfig option (or kubeconfig item in configuration file) should be specified explicitly,
or if you run it inside cluster, leave it alone and in-cluster configuration will be discovered and used.

Before you run apisix-ingress-controller, be sure all related resources, like CRDs (ApisixRoute, ApisixUpstream and etc),
the apisix cluster and others are created`,
        Run: func(cmd *cobra.Command, args []string) {
            if configPath != "" {
                c, err := config.NewConfigFromFile(configPath)
                if err != nil {
                    dief("failed to initialize configuration: %s", err)
                }
                cfg = c
            }
            if err := cfg.Validate(); err != nil {
                dief("bad configuration: %s", err)
            }

            logger, err := log.NewLogger(
                log.WithLogLevel(cfg.LogLevel),
                log.WithOutputFile(cfg.LogOutput),
            )
            if err != nil {
                dief("failed to initialize logging: %s", err)
            }
            log.DefaultLogger = logger
            log.Info("apisix ingress controller started")

            log.Info("version:\n", version.Long())

            data, err := json.MarshalIndent(cfg, "", "\t")
            if err != nil {
                dief("failed to show configuration: %s", string(data))
            }
            log.Info("use configuration\n", string(data))

            stop := make(chan struct{})
            ingress, err := controller.NewController(cfg)
            if err != nil {
                dief("failed to create ingress controller: %s", err)
            }
            go func() {
                if err := ingress.Run(stop); err != nil {
                    dief("failed to run ingress controller: %s", err)
                }
            }()

            waitForSignal(stop)
            log.Info("apisix ingress controller exited")
        },
    }

    cmd.PersistentFlags().StringVar(&configPath, "config-path", "", "configuration file path for apisix-ingress-controller")
    cmd.PersistentFlags().StringVar(&cfg.LogLevel, "log-level", "info", "error log level")
    cmd.PersistentFlags().StringVar(&cfg.LogOutput, "log-output", "stderr", "error log output file")
    cmd.PersistentFlags().StringVar(&cfg.HTTPListen, "http-listen", ":8080", "the HTTP Server listen address")
    cmd.PersistentFlags().BoolVar(&cfg.EnableProfiling, "enable-profiling", true, "enable profiling via web interface host:port/debug/pprof")
    cmd.PersistentFlags().StringVar(&cfg.Kubernetes.Kubeconfig, "kubeconfig", "", "Kubernetes configuration file (by default in-cluster configuration will be used)")
    cmd.PersistentFlags().DurationVar(&cfg.Kubernetes.ResyncInterval.Duration, "resync-interval", time.Minute, "the controller resync (with Kubernetes) interval, the minimum resync interval is 30s")
    cmd.PersistentFlags().StringSliceVar(&cfg.Kubernetes.AppNamespaces, "app-namespace", []string{config.NamespaceAll}, "namespaces that controller will watch for resources")
    cmd.PersistentFlags().StringVar(&cfg.Kubernetes.IngressClass, "ingress-class", config.IngressClass, "the class of an Ingress object is set using the field IngressClassName in Kubernetes clusters version v1.18.0 or higher or the annotation \"kubernetes.io/ingress.class\" (deprecated)")
    cmd.PersistentFlags().StringVar(&cfg.Kubernetes.ElectionID, "election-id", config.IngressAPISIXLeader, "election id used for campaign the controller leader")
    cmd.PersistentFlags().StringVar(&cfg.Kubernetes.IngressVersion, "ingress-version", config.IngressNetworkingV1, "the supported ingress api group version, can be \"networking/v1beta1\", \"networking/v1\" (for Kubernetes version v1.19.0 or higher) and \"extensions/v1beta1\"")
    cmd.PersistentFlags().StringVar(&cfg.Kubernetes.ApisixRouteVersion, "apisix-route-version", config.ApisixRouteV2alpha1, "the supported apisixroute api group version, can be \"apisix.apache.org/v1\" or \"apisix.apache.org/v2alpha1\"")
    cmd.PersistentFlags().BoolVar(&cfg.Kubernetes.WatchEndpointSlices, "watch-endpointslices", false, "whether to watch endpointslices rather than endpoints")
    cmd.PersistentFlags().StringVar(&cfg.APISIX.BaseURL, "apisix-base-url", "", "the base URL for APISIX admin api / manager api (deprecated, using --default-apisix-cluster-base-url instead)")
    cmd.PersistentFlags().StringVar(&cfg.APISIX.AdminKey, "apisix-admin-key", "", "admin key used for the authorization of APISIX admin api / manager api (deprecated, using --default-apisix-cluster-admin-key instead)")
    cmd.PersistentFlags().StringVar(&cfg.APISIX.DefaultClusterBaseURL, "default-apisix-cluster-base-url", "", "the base URL of admin api / manager api for the default APISIX cluster")
    cmd.PersistentFlags().StringVar(&cfg.APISIX.DefaultClusterAdminKey, "default-apisix-cluster-admin-key", "", "admin key used for the authorization of admin api / manager api for the default APISIX cluster")
    cmd.PersistentFlags().StringVar(&cfg.APISIX.DefaultClusterName, "default-apisix-cluster-name", "default", "name of the default apisix cluster")

    return cmd
}
通过这行代码(ingress, err := controller.NewController(cfg))可以看到,启动了pkg/ingress/controller.go

Collector结构定义

// Controller is the ingress apisix controller object.
type Controller struct {
    name              string
    namespace         string
    cfg               *config.Config
    wg                sync.WaitGroup
    watchingNamespace map[string]struct{}
    apisix            apisix.APISIX
    podCache          types.PodCache
    translator        translation.Translator
    apiServer         *api.Server
    metricsCollector  metrics.Collector
    kubeClient        *kube.KubeClient
    // recorder event
    recorder record.EventRecorder
    // this map enrolls which ApisixTls objects refer to a Kubernetes
    // Secret object.
    secretSSLMap *sync.Map

    // leaderContextCancelFunc will be called when apisix-ingress-controller
    // decides to give up its leader role.
    leaderContextCancelFunc context.CancelFunc

    // common informers and listers
    podInformer                 cache.SharedIndexInformer
    podLister                   listerscorev1.PodLister
    epInformer                  cache.SharedIndexInformer
    epLister                    kube.EndpointLister
    svcInformer                 cache.SharedIndexInformer
    svcLister                   listerscorev1.ServiceLister
    ingressLister               kube.IngressLister
    ingressInformer             cache.SharedIndexInformer
    secretInformer              cache.SharedIndexInformer
    secretLister                listerscorev1.SecretLister
    apisixUpstreamInformer      cache.SharedIndexInformer
    apisixUpstreamLister        listersv1.ApisixUpstreamLister
    apisixRouteLister           kube.ApisixRouteLister
    apisixRouteInformer         cache.SharedIndexInformer
    apisixTlsLister             listersv1.ApisixTlsLister
    apisixTlsInformer           cache.SharedIndexInformer
    apisixClusterConfigLister   listersv2alpha1.ApisixClusterConfigLister
    apisixClusterConfigInformer cache.SharedIndexInformer
    apisixConsumerInformer      cache.SharedIndexInformer
    apisixConsumerLister        listersv2alpha1.ApisixConsumerLister

    // resource controllers
    podController           *podController
    endpointsController     *endpointsController
    endpointSliceController *endpointSliceController
    ingressController       *ingressController
    secretController        *secretController

    apisixUpstreamController      *apisixUpstreamController
    apisixRouteController         *apisixRouteController
    apisixTlsController           *apisixTlsController
    apisixClusterConfigController *apisixClusterConfigController
    apisixConsumerController      *apisixConsumerController
}

做一些初始化操作

// NewController creates an ingress apisix controller object.
func NewController(cfg *config.Config) (*Controller, error) {
    podName := os.Getenv("POD_NAME")
    podNamespace := os.Getenv("POD_NAMESPACE")
    if podNamespace == "" {
        podNamespace = "default"
    }
    client, err := apisix.NewClient()
    if err != nil {
        return nil, err
    }

    kubeClient, err := kube.NewKubeClient(cfg)
    if err != nil {
        return nil, err
    }

    apiSrv, err := api.NewServer(cfg)
    if err != nil {
        return nil, err
    }

    var (
        watchingNamespace map[string]struct{}
    )
    if len(cfg.Kubernetes.AppNamespaces) > 1 || cfg.Kubernetes.AppNamespaces[0] != v1.NamespaceAll {
        watchingNamespace = make(map[string]struct{}, len(cfg.Kubernetes.AppNamespaces))
        for _, ns := range cfg.Kubernetes.AppNamespaces {
            watchingNamespace[ns] = struct{}{}
        }
    }

    // recorder
    utilruntime.Must(apisixscheme.AddToScheme(scheme.Scheme))
    eventBroadcaster := record.NewBroadcaster()
    eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClient.Client.CoreV1().Events("")})

    c := &Controller{
        name:              podName,
        namespace:         podNamespace,
        cfg:               cfg,
        apiServer:         apiSrv,
        apisix:            client,
        metricsCollector:  metrics.NewPrometheusCollector(podName, podNamespace),
        kubeClient:        kubeClient,
        watchingNamespace: watchingNamespace,
        secretSSLMap:      new(sync.Map),
        recorder:          eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: _component}),

        podCache: types.NewPodCache(),
    }
    return c, nil
}

 

// Run launches the controller.
func (c *Controller) Run(stop chan struct{}) error {
    rootCtx, rootCancel := context.WithCancel(context.Background())
    defer rootCancel()
    go func() {
        <-stop
        rootCancel()
    }()
    c.metricsCollector.ResetLeader(false)

    go func() {
        if err := c.apiServer.Run(rootCtx.Done()); err != nil {
            log.Errorf("failed to launch API Server: %s", err)
        }
    }()

    lock := &resourcelock.LeaseLock{
        LeaseMeta: metav1.ObjectMeta{
            Namespace: c.namespace,
            Name:      c.cfg.Kubernetes.ElectionID,
        },
        Client: c.kubeClient.Client.CoordinationV1(),
        LockConfig: resourcelock.ResourceLockConfig{
            Identity:      c.name,
            EventRecorder: c,
        },
    }
    cfg := leaderelection.LeaderElectionConfig{
        Lock:          lock,
        LeaseDuration: 15 * time.Second,
        RenewDeadline: 5 * time.Second,
        RetryPeriod:   2 * time.Second,
        Callbacks: leaderelection.LeaderCallbacks{
            OnStartedLeading: c.run,
            OnNewLeader: func(identity string) {
                log.Warnf("found a new leader %s", identity)
                if identity != c.name {
                    log.Infow("controller now is running as a candidate",
                        zap.String("namespace", c.namespace),
                        zap.String("pod", c.name),
                    )
                }
            },
            OnStoppedLeading: func() {
                log.Infow("controller now is running as a candidate",
                    zap.String("namespace", c.namespace),
                    zap.String("pod", c.name),
                )
                c.metricsCollector.ResetLeader(false)
            },
        },
        // Set it to false as current leaderelection implementation will report
        // "Failed to release lock: resource name may not be empty" error when
        // ReleaseOnCancel is true and the Run context is cancelled.
        ReleaseOnCancel: false,
        Name:            "ingress-apisix",
    }

    elector, err := leaderelection.NewLeaderElector(cfg)
    if err != nil {
        log.Errorf("failed to create leader elector: %s", err.Error())
        return err
    }

election:
    curCtx, cancel := context.WithCancel(rootCtx)
    c.leaderContextCancelFunc = cancel
    elector.Run(curCtx)
    select {
    case <-rootCtx.Done():
        return nil
    default:
        goto election
    }
}

 

func (c *Controller) run(ctx context.Context) {
    log.Infow("controller tries to leading ...",
        zap.String("namespace", c.namespace),
        zap.String("pod", c.name),
    )

    var cancelFunc context.CancelFunc
    ctx, cancelFunc = context.WithCancel(ctx)
    defer cancelFunc()

    // give up leader
    defer c.leaderContextCancelFunc()

    clusterOpts := &apisix.ClusterOptions{
        Name:     c.cfg.APISIX.DefaultClusterName,
        AdminKey: c.cfg.APISIX.DefaultClusterAdminKey,
        BaseURL:  c.cfg.APISIX.DefaultClusterBaseURL,
    }
    err := c.apisix.AddCluster(ctx, clusterOpts)
    if err != nil && err != apisix.ErrDuplicatedCluster {
        // TODO give up the leader role
        log.Errorf("failed to add default cluster: %s", err)
        return
    }

    if err := c.apisix.Cluster(c.cfg.APISIX.DefaultClusterName).HasSynced(ctx); err != nil {
        // TODO give up the leader role
        log.Errorf("failed to wait the default cluster to be ready: %s", err)

        // re-create apisix cluster, used in next c.run
        if err = c.apisix.UpdateCluster(ctx, clusterOpts); err != nil {
            log.Errorf("failed to update default cluster: %s", err)
            return
        }
        return
    }

    c.initWhenStartLeading()

    c.goAttach(func() {
        c.checkClusterHealth(ctx, cancelFunc)
    })
    c.goAttach(func() {
        c.podInformer.Run(ctx.Done())
    })
    c.goAttach(func() {
        c.epInformer.Run(ctx.Done())
    })
    c.goAttach(func() {
        c.svcInformer.Run(ctx.Done())
    })
    c.goAttach(func() {
        c.ingressInformer.Run(ctx.Done())
    })
    c.goAttach(func() {
        c.apisixRouteInformer.Run(ctx.Done())
    })
    c.goAttach(func() {
        c.apisixUpstreamInformer.Run(ctx.Done())
    })
    c.goAttach(func() {
        c.apisixClusterConfigInformer.Run(ctx.Done())
    })
    c.goAttach(func() {
        c.secretInformer.Run(ctx.Done())
    })
    c.goAttach(func() {
        c.apisixTlsInformer.Run(ctx.Done())
    })
    c.goAttach(func() {
        c.apisixConsumerInformer.Run(ctx.Done())
    })
    c.goAttach(func() {
        c.podController.run(ctx)
    })
    c.goAttach(func() {
        if c.cfg.Kubernetes.WatchEndpointSlices {
            c.endpointSliceController.run(ctx)
        } else {
            c.endpointsController.run(ctx)
        }
    })
    c.goAttach(func() {
        c.apisixUpstreamController.run(ctx)
    })
    c.goAttach(func() {
        c.ingressController.run(ctx)
    })
    c.goAttach(func() {
        c.apisixRouteController.run(ctx)
    })
    c.goAttach(func() {
        c.apisixClusterConfigController.run(ctx)
    })
    c.goAttach(func() {
        c.apisixTlsController.run(ctx)
    })
    c.goAttach(func() {
        c.secretController.run(ctx)
    })
    c.goAttach(func() {
        c.apisixConsumerController.run(ctx)
    })

    c.metricsCollector.ResetLeader(true)

    log.Infow("controller now is running as leader",
        zap.String("namespace", c.namespace),
        zap.String("pod", c.name),
    )

    <-ctx.Done()
    c.wg.Wait()
}

 

func (c *Controller) initWhenStartLeading() {
    var (
        ingressInformer     cache.SharedIndexInformer
        apisixRouteInformer cache.SharedIndexInformer
    )

    kubeFactory := c.kubeClient.NewSharedIndexInformerFactory()
    apisixFactory := c.kubeClient.NewAPISIXSharedIndexInformerFactory()

    c.podLister = kubeFactory.Core().V1().Pods().Lister()
    c.epLister, c.epInformer = kube.NewEndpointListerAndInformer(kubeFactory, c.cfg.Kubernetes.WatchEndpointSlices)
    c.svcLister = kubeFactory.Core().V1().Services().Lister()
    c.ingressLister = kube.NewIngressLister(
        kubeFactory.Networking().V1().Ingresses().Lister(),
        kubeFactory.Networking().V1beta1().Ingresses().Lister(),
        kubeFactory.Extensions().V1beta1().Ingresses().Lister(),
    )
    c.secretLister = kubeFactory.Core().V1().Secrets().Lister()
    c.apisixRouteLister = kube.NewApisixRouteLister(
        apisixFactory.Apisix().V1().ApisixRoutes().Lister(),
        apisixFactory.Apisix().V2alpha1().ApisixRoutes().Lister(),
        apisixFactory.Apisix().V2beta1().ApisixRoutes().Lister(),
    )
    c.apisixUpstreamLister = apisixFactory.Apisix().V1().ApisixUpstreams().Lister()
    c.apisixTlsLister = apisixFactory.Apisix().V1().ApisixTlses().Lister()
    c.apisixClusterConfigLister = apisixFactory.Apisix().V2alpha1().ApisixClusterConfigs().Lister()
    c.apisixConsumerLister = apisixFactory.Apisix().V2alpha1().ApisixConsumers().Lister()

    c.translator = translation.NewTranslator(&translation.TranslatorOptions{
        PodCache:             c.podCache,
        PodLister:            c.podLister,
        EndpointLister:       c.epLister,
        ServiceLister:        c.svcLister,
        ApisixUpstreamLister: c.apisixUpstreamLister,
        SecretLister:         c.secretLister,
        UseEndpointSlices:    c.cfg.Kubernetes.WatchEndpointSlices,
    })

    if c.cfg.Kubernetes.IngressVersion == config.IngressNetworkingV1 {
        ingressInformer = kubeFactory.Networking().V1().Ingresses().Informer()
    } else if c.cfg.Kubernetes.IngressVersion == config.IngressNetworkingV1beta1 {
        ingressInformer = kubeFactory.Networking().V1beta1().Ingresses().Informer()
    } else {
        ingressInformer = kubeFactory.Extensions().V1beta1().Ingresses().Informer()
    }
    switch c.cfg.Kubernetes.ApisixRouteVersion {
    case config.ApisixRouteV1:
        apisixRouteInformer = apisixFactory.Apisix().V1().ApisixRoutes().Informer()
    case config.ApisixRouteV2alpha1:
        apisixRouteInformer = apisixFactory.Apisix().V2alpha1().ApisixRoutes().Informer()
    case config.ApisixRouteV2beta1:
        apisixRouteInformer = apisixFactory.Apisix().V2beta1().ApisixRoutes().Informer()
    }

    c.podInformer = kubeFactory.Core().V1().Pods().Informer()
    c.svcInformer = kubeFactory.Core().V1().Services().Informer()
    c.ingressInformer = ingressInformer
    c.apisixRouteInformer = apisixRouteInformer
    c.apisixUpstreamInformer = apisixFactory.Apisix().V1().ApisixUpstreams().Informer()
    c.apisixClusterConfigInformer = apisixFactory.Apisix().V2alpha1().ApisixClusterConfigs().Informer()
    c.secretInformer = kubeFactory.Core().V1().Secrets().Informer()
    c.apisixTlsInformer = apisixFactory.Apisix().V1().ApisixTlses().Informer()
    c.apisixConsumerInformer = apisixFactory.Apisix().V2alpha1().ApisixConsumers().Informer()

    if c.cfg.Kubernetes.WatchEndpointSlices {
        c.endpointSliceController = c.newEndpointSliceController()
    } else {
        c.endpointsController = c.newEndpointsController()
    }
    c.podController = c.newPodController()
    c.apisixUpstreamController = c.newApisixUpstreamController()
    c.ingressController = c.newIngressController()
    c.apisixRouteController = c.newApisixRouteController()
    c.apisixClusterConfigController = c.newApisixClusterConfigController()
    c.apisixTlsController = c.newApisixTlsController()
    c.secretController = c.newSecretController()
    c.apisixConsumerController = c.newApisixConsumerController()
}

一圈下来,最后又回到了定义的的地方pkg/ingress/apisix_consumer.go

func (c *Controller) newApisixConsumerController() *apisixConsumerController {
    ctl := &apisixConsumerController{
        controller: c,
        workqueue:  workqueue.NewNamedRateLimitingQueue(workqueue.NewItemFastSlowRateLimiter(1*time.Second, 60*time.Second, 5), "ApisixConsumer"),
        workers:    1,
    }
    ctl.controller.apisixConsumerInformer.AddEventHandler(
        cache.ResourceEventHandlerFuncs{
            AddFunc:    ctl.onAdd,
            UpdateFunc: ctl.onUpdate,
            DeleteFunc: ctl.onDelete,
        },
    )
    return ctl
}

进而处理各类事件

func (c *apisixConsumerController) run(ctx context.Context) {
    log.Info("ApisixConsumer controller started")
    defer log.Info("ApisixConsumer controller exited")
    if ok := cache.WaitForCacheSync(ctx.Done(), c.controller.apisixConsumerInformer.HasSynced); !ok {
        log.Error("cache sync failed")
        return
    }
    for i := 0; i < c.workers; i++ {
        go c.runWorker(ctx)
    }
    <-ctx.Done()
    c.workqueue.ShutDown()
}

func (c *apisixConsumerController) runWorker(ctx context.Context) {
    for {
        obj, quit := c.workqueue.Get()
        if quit {
            return
        }
        err := c.sync(ctx, obj.(*types.Event))
        c.workqueue.Done(obj)
        c.handleSyncErr(obj, err)
    }
}

func (c *apisixConsumerController) sync(ctx context.Context, ev *types.Event) error {
    key := ev.Object.(string)
    namespace, name, err := cache.SplitMetaNamespaceKey(key)
    if err != nil {
        log.Errorf("found ApisixConsumer resource with invalid meta namespace key %s: %s", key, err)
        return err
    }

    ac, err := c.controller.apisixConsumerLister.ApisixConsumers(namespace).Get(name)
    if err != nil {
        if !k8serrors.IsNotFound(err) {
            log.Errorf("failed to get ApisixConsumer %s: %s", key, err)
            return err
        }
        if ev.Type != types.EventDelete {
            log.Warnf("ApisixConsumer %s was deleted before it can be delivered", key)
            // Don't need to retry.
            return nil
        }
    }
    if ev.Type == types.EventDelete {
        if ac != nil {
            // We still find the resource while we are processing the DELETE event,
            // that means object with same namespace and name was created, discarding
            // this stale DELETE event.
            log.Warnf("discard the stale ApisixConsumer delete event since the %s exists", key)
            return nil
        }
        ac = ev.Tombstone.(*configv2alpha1.ApisixConsumer)
    }

    consumer, err := c.controller.translator.TranslateApisixConsumer(ac)
    if err != nil {
        log.Errorw("failed to translate ApisixConsumer",
            zap.Error(err),
            zap.Any("ApisixConsumer", ac),
        )
        c.controller.recorderEvent(ac, corev1.EventTypeWarning, _resourceSyncAborted, err)
        c.controller.recordStatus(ac, _resourceSyncAborted, err, metav1.ConditionFalse)
        return err
    }
    log.Debug("got consumer object from ApisixConsumer",
        zap.Any("consumer", consumer),
        zap.Any("ApisixConsumer", ac),
    )

    if err := c.controller.syncConsumer(ctx, consumer, ev.Type); err != nil {
        log.Errorw("failed to sync Consumer to APISIX",
            zap.Error(err),
            zap.Any("consumer", consumer),
        )
        c.controller.recorderEvent(ac, corev1.EventTypeWarning, _resourceSyncAborted, err)
        c.controller.recordStatus(ac, _resourceSyncAborted, err, metav1.ConditionFalse)
        return err
    }

    c.controller.recorderEvent(ac, corev1.EventTypeNormal, _resourceSynced, nil)
    return nil
}

 

posted on 2021-09-30 15:32  it_worker365  阅读(560)  评论(0编辑  收藏  举报