# 创建资源
[root@k-m-1 crds]# kubectl apply -f crd-demo.yaml
crontab.stable.example.com/crontab-crd created
# 查看资源
[root@k-m-1 crds]# kubectl get crontabs.stable.example.com
NAME AGE
crontab-crd 21s
# 使用简写查看资源
[root@k-m-1 crds]# kubectl get ct
NAME AGE
crontab-crd 5s
# 到此为止这就是CRD的一个概念,它只是定义数据,但是至于怎么处理数据,需要下面的Controller来实现。
# 开发环境
需要开发Operator自然Kubernets集群是少不了的,还需要有Golang环境,这里安装没什么可说的,然后我们需要安装operator-sdk,operator-sdk安装的方法非常多,可以直接在github上下载要使用的版本,然后放置到PATH环境下即可,当然也可以使用源码自己去编译安装也是完全OK的,我这里直接选择二进制了
# 因为我的集群是1.25.0的版本,所以我选择的operator-sdk的版本也不会太靠前
[root@localhost ~]# wget https://github.com/operator-framework/operator-sdk/releases/download/v1.28.0/operator-sdk_linux_amd64
[root@localhost ~]# mv operator-sdk_linux_amd64 /usr/local/bin/operator-sdk
[root@localhost ~]# chmod +x /usr/local/bin/operator-sdk
[root@localhost ~]# operator-sdk version
[root@localhost app-operator]# operator-sdk version
operator-sdk version: "v1.28.0", commit: "484013d1865c35df2bc5dfea0ab6ea6b434adefa", kubernetes version: "1.26.0", go version: "go1.19.6", GOOS: "linux", GOARCH: "amd64"
# 创建项目
[root@localhost ~]# mkdir app-operator
[root@localhost ~]# cd app-operator/
[root@localhost app-operator]# operator-sdk init --domain kudevops.io --repo github.com/gitlayzer/app-operator
# 此处的--domain就是我们apiVersion中的apps后面的域名,可以理解为是一个group的名称,后面的repo就是package的名称,第一次如果没有下载包的话,需要等待一段时间,如果不是第一次的话会很快。
[root@localhost app-operator]# operator-sdk init --domain kudevops.io --repo github.com/gitlayzer/app-operator
Writing kustomize manifests for you to edit...
Writing scaffold for you to edit...
Get controller runtime:
$ go get sigs.k8s.io/controller-runtime@v0.14.1
Update dependencies:
$ go mod tidy
Next: define a resource with:
$ operator-sdk create api
# 创建完成之后,这里会让我们创建一个api,我们先看看当前的目录结构是什么样的
[root@localhost app-operator]# tree -L 2
.
├── config # 项目所需的所有文件
│ ├── default
│ ├── manager
│ ├── manifests
│ ├── prometheus
│ ├── rbac
│ └── scorecard
├── Dockerfile # 构建镜像
├── go.mod
├── go.sum
├── hack
│ └── boilerplate.go.txt
├── main.go # 整体项目入口文件
├── Makefile # 操作脚本
├── PROJECT
└── README.md
8 directories, 8 files
# 接下来就是按照上面所说,我们去创建一个API
[root@localhost app-operator]# operator-sdk create api --group apps --version v1alpha1 --kind AppService --resource --controller
Writing kustomize manifests for you to edit...
Writing scaffold for you to edit...
api/v1alpha1/appservice_types.go
controllers/appservice_controller.go
Update dependencies:
$ go mod tidy
Running make:
$ make generate
mkdir -p /root/app-operator/bin
test -s /root/app-operator/bin/controller-gen && /root/app-operator/bin/controller-gen --version | grep -q v0.11.1 || \
GOBIN=/root/app-operator/bin go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.11.1
go: downloading sigs.k8s.io/controller-tools v0.11.1
go: downloading github.com/spf13/cobra v1.6.1
go: downloading github.com/gobuffalo/flect v0.3.0
go: downloading golang.org/x/tools v0.4.0
go: downloading github.com/fatih/color v1.13.0
go: downloading k8s.io/utils v0.0.0-20221107191617-1a15be271d1d
go: downloading github.com/mattn/go-colorable v0.1.9
go: downloading github.com/mattn/go-isatty v0.0.14
go: downloading golang.org/x/net v0.4.0
go: downloading golang.org/x/mod v0.7.0
/root/app-operator/bin/controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
Next: implement your new API and generate the manifests (e.g. CRDs,CRs) with:
$ make manifests
# 从这里就可以看出来,它已经帮我们创建好了,我们再来看看新的目录结构
[root@localhost app-operator]# tree -L 2
.
├── api
│ └── v1alpha1
├── bin
│ └── controller-gen
├── config
│ ├── crd
│ ├── default
│ ├── manager
│ ├── manifests
│ ├── prometheus
│ ├── rbac
│ ├── samples
│ └── scorecard
├── controllers
│ ├── appservice_controller.go
│ └── suite_test.go
├── Dockerfile
├── go.mod
├── go.sum
├── hack
│ └── boilerplate.go.txt
├── main.go
├── Makefile
├── PROJECT
└── README.md
14 directories, 11 files
# 随后我们就需要去定义自己的结构体了,具体代码如下
// api/v1alpha1/appservice_types.go/*
Copyright 2023.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/package v1alpha1
import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.// AppServiceSpec defines the desired state of AppServicetype AppServiceSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster// Important: Run "make" to regenerate code after modifying this file
Replicas *int32`json:"replicas"`
Image string`json:"image"`
Ports []corev1.ServicePort `json:"ports,omitempty"`
}
// AppServiceStatus defines the observed state of AppServicetype AppServiceStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster// Important: Run "make" to regenerate code after modifying this file
appsv1.DeploymentStatus `json:",inline"`
}
//+kubebuilder:object:root=true//+kubebuilder:subresource:status// AppService is the Schema for the appservices APItype AppService struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec AppServiceSpec `json:"spec,omitempty"`
Status AppServiceStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true// AppServiceList contains a list of AppServicetype AppServiceList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []AppService `json:"items"`
}
funcinit() {
SchemeBuilder.Register(&AppService{}, &AppServiceList{})
}
# 这些写完之后我们直接make以下命令,它会自动帮我们去go mod tidy 和生成相关的代码
[root@localhost app-operator]# make
test -s /root/app-operator/bin/controller-gen && /root/app-operator/bin/controller-gen --version | grep -q v0.11.1 || \
GOBIN=/root/app-operator/bin go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.11.1
/root/app-operator/bin/controller-gen rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
/root/app-operator/bin/controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
go fmt ./...
api/v1alpha1/appservice_types.go
go vet ./...
go build -o bin/manager main.go
# 当然这个时候如果我们还可以使用 make generate生成crd
[root@localhost app-operator]# make generate
test -s /root/app-operator/bin/controller-gen && /root/app-operator/bin/controller-gen --version | grep -q v0.11.1 || \
GOBIN=/root/app-operator/bin go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.11.1
/root/app-operator/bin/controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
# 我们来看看crd
[root@localhost app-operator]# ls config/crd/bases/apps.kudevops.io_appservices.yaml
config/crd/bases/apps.kudevops.io_appservices.yaml
# 这个时候它就可以将CRD部署到K8S上去了
[root@localhost app-operator]# kubectl apply -k config/crd
customresourcedefinition.apiextensions.k8s.io/appservices.apps.kudevops.io created
# 查看部署的CRD
[root@localhost app-operator]# kubectl get crds | grep kudevops.io
appservices.apps.kudevops.io 2023-06-14T13:54:17Z
# 查看支持的资源
[root@localhost app-operator]# kubectl api-resources | grep kudevops.io
appservices apps.kudevops.io/v1alpha1 true AppService
# 下面我们就是需要去完善我们的自定义资源的清单了
# 我们部署一下资源
[root@localhost app-operator]# kubectl apply -f config/samples/apps_v1alpha1_appservice.yaml
appservice.apps.kudevops.io/appservice-sample created、
# 查看资源
[root@localhost app-operator]# kubectl get appservices
NAME AGE
appservice-sample 8s
# 但这个时候就像我们前面讲的CRD一样,没什么意义,因为我们还没有针对这个自定义资源去写控制器来对这个资源进行任何操作,那么接下来就是去写一个controller来针对这个自定义资源进行操作了。
/*
Copyright 2023.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/package controllers
import (
"context"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1""k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1""k8s.io/apimachinery/pkg/runtime/schema""k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime""sigs.k8s.io/controller-runtime/pkg/client""sigs.k8s.io/controller-runtime/pkg/log"
appsv1alpha1 "github.com/gitlayzer/app-operator/api/v1alpha1"
)
// AppServiceReconciler reconciles a AppService objecttype AppServiceReconciler struct {
client.Client
Scheme *runtime.Scheme
}
//+kubebuilder:rbac:groups=apps.kudevops.io,resources=appservices,verbs=get;list;watch;create;update;patch;delete//+kubebuilder:rbac:groups=apps.kudevops.io,resources=appservices/status,verbs=get;update;patch//+kubebuilder:rbac:groups=apps.kudevops.io,resources=appservices/finalizers,verbs=update// Reconcile is part of the main kubernetes reconciliation loop which aims to// move the current state of the cluster closer to the desired state.// TODO(user): Modify the Reconcile function to compare the state specified by// the AppService object against the actual cluster state, and then// perform operations to make the cluster state reflect the state specified by// the user.//// For more details, check Reconcile and its Result here:// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcilefunc(r *AppServiceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
reqLogger := log.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name)
reqLogger.Info("Reconciling AppService......")
instance := &appsv1alpha1.AppService{}
if err := r.Client.Get(ctx, req.NamespacedName, instance); err != nil && errors.IsNotFound(err) {
return ctrl.Result{}, nil
}
deploy := &appsv1.Deployment{}
if err := r.Client.Get(ctx, req.NamespacedName, deploy); err != nil && errors.IsNotFound(err) {
deployment := NewDeploy(instance)
if err := r.Client.Create(ctx, deployment); err != nil {
return ctrl.Result{}, err
}
service := NewService(instance)
if err := r.Client.Create(ctx, service); err != nil {
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.func(r *AppServiceReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&appsv1alpha1.AppService{}).
Complete(r)
}
funcNewDeploy(app *appsv1alpha1.AppService) *appsv1.Deployment {
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: app.Name,
Namespace: app.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(app, schema.GroupVersionKind{
Group: appsv1alpha1.GroupVersion.Group,
Version: appsv1alpha1.GroupVersion.Version,
Kind: "AppService",
}),
},
},
Spec: appsv1.DeploymentSpec{
Replicas: app.Spec.Replicas,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": app.Name,
},
},
Spec: corev1.PodSpec{
Containers: NewContainers(app),
},
},
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": app.Name,
},
},
},
}
}
funcNewContainers(app *appsv1alpha1.AppService) []corev1.Container {
var containerPorts []corev1.ContainerPort
for _, port := range app.Spec.Ports {
containerPort := corev1.ContainerPort{}
containerPort.ContainerPort = port.TargetPort.IntVal
containerPorts = append(containerPorts, containerPort)
}
return []corev1.Container{{
Name: app.Name,
Image: app.Spec.Image,
Ports: containerPorts,
ImagePullPolicy: corev1.PullIfNotPresent,
}}
}
funcNewService(app *appsv1alpha1.AppService) *corev1.Service {
return &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: app.Name,
Namespace: app.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(app, schema.GroupVersionKind{
Group: appsv1alpha1.GroupVersion.Group,
Version: appsv1alpha1.GroupVersion.Version,
Kind: "AppService",
}),
},
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeNodePort,
Ports: app.Spec.Ports,
Selector: map[string]string{
"app": app.Name,
},
},
}
}
# 如上图所示,这就是一个最简单的控制器,我们只创建资源,不涉及其他的操作,那么下面就是针对这个控制器进行调试了,我们可以直接使用make脚本部署这个控制器
[root@localhost app-operator]# make
/root/app-operator/bin/controller-gen rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
/root/app-operator/bin/controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
go fmt ./...
go vet ./...
go build -o bin/manager main.go
[root@localhost app-operator]# make generate
test -s /root/app-operator/bin/controller-gen && /root/app-operator/bin/controller-gen --version | grep -q v0.11.1 || \
GOBIN=/root/app-operator/bin go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.11.1
/root/app-operator/bin/controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
[root@localhost app-operator]# make install run
test -s /root/app-operator/bin/controller-gen && /root/app-operator/bin/controller-gen --version | grep -q v0.11.1 || \
GOBIN=/root/app-operator/bin go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.11.1
/root/app-operator/bin/controller-gen rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
test -s /root/app-operator/bin/kustomize || { curl -Ss "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash -s -- 3.8.7 /root/app-operator/bin; }
/root/app-operator/bin/kustomize build config/crd | kubectl apply -f -
customresourcedefinition.apiextensions.k8s.io/appservices.apps.kudevops.io created
/root/app-operator/bin/controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
go fmt ./...
go vet ./...
go run ./main.go
2023-06-14T23:52:34+08:00 INFO controller-runtime.metrics Metrics server is starting to listen {"addr": ":8080"}
2023-06-14T23:52:34+08:00 INFO setup starting manager
2023-06-14T23:52:34+08:00 INFO Starting server {"path": "/metrics", "kind": "metrics", "addr": "[::]:8080"}
2023-06-14T23:52:34+08:00 INFO Starting server {"kind": "health probe", "addr": "[::]:8081"}
2023-06-14T23:52:34+08:00 INFO Starting EventSource {"controller": "appservice", "controllerGroup": "apps.kudevops.io", "controllerKind": "AppService", "source": "kind source: *v1alpha1.AppService"}
2023-06-14T23:52:34+08:00 INFO Starting Controller {"controller": "appservice", "controllerGroup": "apps.kudevops.io", "controllerKind": "AppService"}
2023-06-14T23:52:34+08:00 INFO Starting workers {"controller": "appservice", "controllerGroup": "apps.kudevops.io", "controllerKind": "AppService", "worker count": 1}
# 这个过程中是很尿性的,它会去下载kustomize,但是仔细观察一下它的makefile脚本就可以看到它在判断./bin下面是否有这个程序,我们如果本地有直接链接过来或者改改makefile脚本就行了
# 这个时候可以看到控制器已经启动了,并且日志都给我们打印出来了,当我删除资源的时候就会触发调协
[root@localhost app-operator]# kubectl delete appservice appservice-sample
appservice.apps.kudevops.io "appservice-sample" deleted
# 这是调协打印的日志
2023-06-14T23:07:54+08:00 INFO Reconciling AppService...... {"Request.Namespace": "default", "Request.Name": "appservice-sample"}
2023-06-14T23:09:35+08:00 INFO Reconciling AppService...... {"Request.Namespace": "default", "Request.Name": "appservice-sample"}
# 然后我们去创建一个新的资源
[root@localhost app-operator]# kubectl apply -k config/samples/
appservice.apps.kudevops.io/appservice-sample created
# 可以看到调协的日志又来了
2023-06-14T23:11:21+08:00 INFO Reconciling AppService...... {"Request.Namespace": "default", "Request.Name": "appservice-sample"}
# 然后跟着我们去看看根据我们的逻辑,是否创建了deployment和service
[root@localhost samples]# kubectl get deployment,svc
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/appservice-sample 3/3 3 3 13s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/appservice-sample NodePort 10.96.2.30 <none> 80:30080/TCP 13s
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 44d
# 测试Pod是否可以访问
[root@localhost samples]# curl 10.0.0.11:30080 -I
HTTP/1.1 200 OK
Server: nginx/1.25.0
Date: Wed, 14 Jun 2023 15:54:05 GMT
Content-Type: text/html
Content-Length: 615
Last-Modified: Tue, 23 May 2023 15:08:20 GMT
Connection: keep-alive
ETag: "646cd6e4-267"
Accept-Ranges: bytes
# 然后我们来测试一下关联删除,当我们删除我们自己的资源的时候,是否会跟着删除deployment和service
[root@k-m-1 ~]# kubectl get pod,svc
NAME READY STATUS RESTARTS AGE
pod/appservice-sample-5784bc58bf-5wb6k 1/1 Running 0 8s
pod/appservice-sample-5784bc58bf-62hh5 1/1 Running 0 8s
pod/appservice-sample-5784bc58bf-xdbwp 1/1 Running 0 8s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/appservice-sample NodePort 10.96.1.149 <none> 80:30080/TCP 8s
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 45d
# 删除自定义资源
[root@localhost app-operator]# kubectl delete -k config/samples/
appservice.apps.kudevops.io "appservice-sample" deleted
# 再次查看资源
[root@k-m-1 ~]# kubectl get pod,svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 45d
# 到这里我们的一个自定义的Operator就写好了,那么这个时候其实我们还要知道一些东西,就是operator-sdk还帮我们创建了一些资源比如监控接口,RBAC,ServiceMonitor,这个时候就是我们就可以去封装我们的控制器了,
# 构建镜像
make docker-build docker-push IMG=<registry>/app-operator:v0.0.1
# 使用指定镜像来部署控制器到集群
make deploy IMG=<registry>/app-operator:v0.0.1
# 卸载operator
make undepoy
【推荐】还在用 ECharts 开发大屏?试试这款永久免费的开源 BI 工具!
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· .NET制作智能桌面机器人:结合BotSharp智能体框架开发语音交互
· 软件产品开发中常见的10个问题及处理方法
· .NET 原生驾驭 AI 新基建实战系列:向量数据库的应用与畅想
· 从问题排查到源码分析:ActiveMQ消费端频繁日志刷屏的秘密
· 一次Java后端服务间歇性响应慢的问题排查记录
· 互联网不景气了那就玩玩嵌入式吧,用纯.NET开发并制作一个智能桌面机器人(四):结合BotSharp
· 《HelloGitHub》第 108 期
· 一个基于 .NET 开源免费的异地组网和内网穿透工具
· MQ 如何保证数据一致性?
· Windows桌面应用自动更新解决方案SharpUpdater5发布