k8s Ingress 介绍和部署 IngressController (转)

一、相关文章

k8s Ingress介绍和部署IngressController
离线部署kubernetes ingress nginx controller:v0.48.1
K8S ingress 实践

二、问题处理

k8s v1.23.1部署ingress nginx报错no matches for kind

unable to recognize "mandatory.yaml": no matches for kind "ClusterRole" in version "rbac.authorization.k8s.io/v1beta1"
unable to recognize "mandatory.yaml": no matches for kind "Role" in version "rbac.authorization.k8s.io/v1beta1"
unable to recognize "mandatory.yaml": no matches for kind "RoleBinding" in version "rbac.authorization.k8s.io/v1beta1"
unable to recognize "mandatory.yaml": no matches for kind "ClusterRoleBinding" in version "rbac.authorization.k8s.io/v1beta1"

解决方法:
将报错的v1beta1改为v1即可,最终的mandatory.yaml内容如下:

apiVersion: v1
kind: Namespace
metadata:
  name: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---

kind: ConfigMap
apiVersion: v1
metadata:
  name: nginx-configuration
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: tcp-services
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: udp-services
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nginx-ingress-serviceaccount
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: nginx-ingress-clusterrole
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - endpoints
      - nodes
      - pods
      - secrets
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - services
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - "extensions"
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - events
    verbs:
      - create
      - patch
  - apiGroups:
      - "extensions"
    resources:
      - ingresses/status
    verbs:
      - update

---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: nginx-ingress-role
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - pods
      - secrets
      - namespaces
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - configmaps
    resourceNames:
      # Defaults to "<election-id>-<ingress-class>"
      # Here: "<ingress-controller-leader>-<nginx>"
      # This has to be adapted if you change either parameter
      # when launching the nginx-ingress-controller.
      - "ingress-controller-leader-nginx"
    verbs:
      - get
      - update
  - apiGroups:
      - ""
    resources:
      - configmaps
    verbs:
      - create
  - apiGroups:
      - ""
    resources:
      - endpoints
    verbs:
      - get

---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: nginx-ingress-role-nisa-binding
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: nginx-ingress-role
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: nginx-ingress-clusterrole-nisa-binding
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: nginx-ingress-clusterrole
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-ingress-controller
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: ingress-nginx
      app.kubernetes.io/part-of: ingress-nginx
  template:
    metadata:
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
      annotations:
        prometheus.io/port: "10254"
        prometheus.io/scrape: "true"
    spec:
      serviceAccountName: nginx-ingress-serviceaccount
      containers:
        - name: nginx-ingress-controller
          image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.23.0
          args:
            - /nginx-ingress-controller
            - --configmap=$(POD_NAMESPACE)/nginx-configuration
            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
            - --publish-service=$(POD_NAMESPACE)/ingress-nginx
            - --annotations-prefix=nginx.ingress.kubernetes.io
          securityContext:
            allowPrivilegeEscalation: true
            capabilities:
              drop:
                - ALL
              add:
                - NET_BIND_SERVICE
            # www-data -> 33
            runAsUser: 33
          env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
          ports:
            - name: http
              containerPort: 80
            - name: https
              containerPort: 443
          livenessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            initialDelaySeconds: 10
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 10
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 10

---

ingress-nginx部署状态为CrashLoopBackOff 问题排查

ingress-nginx部署状态为CrashLoopBackOff 问题排查,说起来这个问题挺坑的,kubernetes集群部署的时候,没有开启ipvs转发,所以会有这个问题

系统版本:CentOs8.1
k8s版本:v1.21.3
containerd版本:ctr containerd.io 1.4.3

ingress部署后状态为CrashLoopBackOff ,不停的重启

[root@ck8s1 ingress]# kubectl get pod -n ingress-nginx
NAME                                        READY   STATUS             RESTARTS   AGE
default-http-backend-7b6d9847f6-crgs8       1/1     Running            0          46m
nginx-ingress-controller-7bbb744996-rd9d6   0/1     CrashLoopBackOff   19         46m

describe查看信息如下:

[root@ck8s1 ingress]# kubectl describe pod nginx-ingress-controller-7bbb744996-rd9d6 -n ingress-nginx
Name:         nginx-ingress-controller-7bbb744996-rd9d6
Namespace:    ingress-nginx
Priority:     0
Node:         ck8s2/192.168.43.152
Start Time:   Sun, 15 Aug 2021 12:06:08 +0800
Labels:       app.kubernetes.io/name=ingress-nginx
              app.kubernetes.io/part-of=ingress-nginx
              pod-template-hash=7bbb744996
Annotations:  cni.projectcalico.org/containerID: f59aa02e0654127673fc968533873d70d3b172c6f9cea0f7cab027efc662ee46
              cni.projectcalico.org/podIP: 192.168.135.151/32
              cni.projectcalico.org/podIPs: 192.168.135.151/32
              prometheus.io/port: 10254
              prometheus.io/scrape: true
Status:       Running
IP:           192.168.135.151
IPs:
  IP:           192.168.135.151
Controlled By:  ReplicaSet/nginx-ingress-controller-7bbb744996
Containers:
  nginx-ingress-controller:
    Container ID:  containerd://f0f3c6cb2aafb5bc7474fc44c7b680aede4a481ef0b0c8826a71af88ffa0e663
    Image:         registry.aliyuncs.com/kubernetes/nginx-ingress-controller:0.20.0
    Image ID:      registry.aliyuncs.com/kubernetes/nginx-ingress-controller@sha256:3f06079f7727b2fb7ad5c97d8152eb622ae504674395dfa71fda7ce315aaaf30
    Ports:         80/TCP, 443/TCP
    Host Ports:    0/TCP, 0/TCP
    Args:
      /nginx-ingress-controller
      --default-backend-service=$(POD_NAMESPACE)/default-http-backend
      --configmap=$(POD_NAMESPACE)/nginx-configuration
      --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
      --udp-services-configmap=$(POD_NAMESPACE)/udp-services
      --publish-service=$(POD_NAMESPACE)/ingress-nginx
      --annotations-prefix=nginx.ingress.kubernetes.io
    State:          Waiting
      Reason:       CrashLoopBackOff
    Last State:     Terminated
      Reason:       Error
      Exit Code:    143
      Started:      Sun, 15 Aug 2021 12:44:59 +0800
      Finished:     Sun, 15 Aug 2021 12:45:38 +0800
    Ready:          False
    Restart Count:  17
    Liveness:       http-get http://:10254/healthz delay=10s timeout=1s period=10s #success=1 #failure=3
    Readiness:      http-get http://:10254/healthz delay=0s timeout=1s period=10s #success=1 #failure=3
    Environment:
      POD_NAME:       nginx-ingress-controller-7bbb744996-rd9d6 (v1:metadata.name)
      POD_NAMESPACE:  ingress-nginx (v1:metadata.namespace)
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-mj82d (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  kube-api-access-mj82d:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason     Age                    From               Message
  ----     ------     ----                   ----               -------
  Normal   Scheduled  43m                    default-scheduler  Successfully assigned ingress-nginx/nginx-ingress-controller-7bbb744996-rd9d6 to ck8s2
  Normal   Created    42m (x2 over 43m)      kubelet            Created container nginx-ingress-controller
  Normal   Started    42m (x2 over 43m)      kubelet            Started container nginx-ingress-controller
  Normal   Pulled     41m (x3 over 43m)      kubelet            Container image "registry.aliyuncs.com/kubernetes/nginx-ingress-controller:0.20.0" already present on machine
  Warning  Unhealthy  41m (x10 over 43m)     kubelet            Readiness probe failed: Get "http://192.168.135.151:10254/healthz": dial tcp 192.168.135.151:10254: connect: connection refused
  Normal   Killing    41m (x2 over 42m)      kubelet            Container nginx-ingress-controller failed liveness probe, will be restarted
  Warning  Unhealthy  22m (x34 over 42m)     kubelet            Liveness probe failed: Get "http://192.168.135.151:10254/healthz": dial tcp 192.168.135.151:10254: connect: connection refused
  Warning  BackOff    2m56s (x147 over 40m)  kubelet            Back-off restarting failed container
[root@ck8s1 ingress]# 

三、导出镜像

打tar包

docker save -o nginx-ingress-controller-0.30.0.tar quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.30.0 

压缩文件包:

tar -czf nginx-ingress-controller-0.30.0.tar.gz nginx-ingress-controller-0.30.0.tar

然后把tar包拷贝的内网环境,上传到内网的镜像仓库:

docker load -i  ingress-nginx-controller-v0.48.1.tar
docker load -i kube-webhook-certgen
docker tag  k8s.gcr.io/ingress-nginx/controller:v0.48.1 10.23.120.66/public/ingress-nginx-controller:v0.48.1
docker tag jettech/kube-webhook-certgen:v1.5.1 10.23.120.66/public/kube-webhook-certgen:v1.5.1
docker push 10.23.120.66/public/ingress-nginx-controller:v0.48.1
docker push 10.23.120.66/public/kube-webhook-certgen:v1.5.1

为者常成,行者常至