码迷,mamicode.com
首页 > Web开发 > 详细

跟着炎炎盐实践k8s---Kubernetes1.16.10 二进制高可用集群部署之master节点

时间:2020-07-02 09:22:28      阅读:64      评论:0      收藏:0      [点我收藏+]

标签:etcd   lnp   not   ane   drop   cto   mission   health   高可用集群   

开始在master节点部署k8s组件,我们接着来吧!

  • 一、部署kube-api-server

    cd /opt/k8s/work/
    wget https://github.com/kubernetes/kubernetes/releases/download/v1.16.10/kubernetes.tar.gz
    tar -xzvf kubernetes-server-linux-amd64.tar.gz
    cd kubernetes
    tar -xzvf  kubernetes-src.tar.gz
    cp -f server/bin/{apiextensions-apiserver,kube-controller-manager,kube-proxy,kube-scheduler,kubeadm,kubectl,kubelet,mounter} /opt/k8s/bin/
  • 二、制作证书及相关配置文件

1、创建证书

cd /opt/k8s/work
cat > kubernetes-csr.json <<EOF
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "10.13.33.29",
    "10.13.33.38",
    "10.13.33.40",
    "10.13.33.31",
    "10.13.33.170",
    "10.254.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local."
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "4Paradigm"
    }
  ]
}
EOF
#需要将集群的所有IP及VIP添加进去
#如果要添加注意最后的逗号,不要忘记添加,否则下一步报错

cfssl gencert -ca=/opt/k8s/work/ca.pem       -ca-key=/opt/k8s/work/ca-key.pem       -config=/opt/k8s/work/ca-config.json       -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
ls kubernetes*pem
cp kubernetes*.pem /etc/kubernetes/cert/           ##分发证书到所有master节点

2、创建加密配置文件

cat > encryption-config.yaml <<EOF
kind: EncryptionConfig
apiVersion: v1
resources:
  - resources:
      - secrets
    providers:
      - aescbc:
          keys:
            - name: key1
              secret: ${ENCRYPTION_KEY}
      - identity: {}
EOF

cp encryption-config.yaml /etc/kubernetes/           ##分发证书到所有master节点

3、创建审计策略文件

cat > audit-policy.yaml <<EOF
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
  # The following requests were manually identified as high-volume and low-risk, so drop them.
  - level: None
    resources:
      - group: ""
        resources:
          - endpoints
          - services
          - services/status
    users:
      - ‘system:kube-proxy‘
    verbs:
      - watch
  - level: None
    resources:
      - group: ""
        resources:
          - nodes
          - nodes/status
    userGroups:
      - ‘system:nodes‘
    verbs:
      - get
  - level: None
    namespaces:
      - kube-system
    resources:
      - group: ""
        resources:
          - endpoints
    users:
      - ‘system:kube-controller-manager‘
      - ‘system:kube-scheduler‘
      - ‘system:serviceaccount:kube-system:endpoint-controller‘
    verbs:
      - get
      - update
  - level: None
    resources:
      - group: ""
        resources:
          - namespaces
          - namespaces/status
          - namespaces/finalize
    users:
      - ‘system:apiserver‘
    verbs:
      - get
  # Don‘t log HPA fetching metrics.
  - level: None
    resources:
      - group: metrics.k8s.io
    users:
      - ‘system:kube-controller-manager‘
    verbs:
      - get
      - list
  # Don‘t log these read-only URLs.
  - level: None
    nonResourceURLs:
      - ‘/healthz*‘
      - /version
      - ‘/swagger*‘
  # Don‘t log events requests.
  - level: None
    resources:
      - group: ""
        resources:
          - events
  # node and pod status calls from nodes are high-volume and can be large, don‘t log responses for expected updates from nodes
  - level: Request
    omitStages:
      - RequestReceived
    resources:
      - group: ""
        resources:
          - nodes/status
          - pods/status
    users:
      - kubelet
      - ‘system:node-problem-detector‘
      - ‘system:serviceaccount:kube-system:node-problem-detector‘
    verbs:
      - update
      - patch
  - level: Request
    omitStages:
      - RequestReceived
    resources:
      - group: ""
        resources:
          - nodes/status
          - pods/status
    userGroups:
      - ‘system:nodes‘
    verbs:
      - update
      - patch
  # deletecollection calls can be large, don‘t log responses for expected namespace deletions
  - level: Request
    omitStages:
      - RequestReceived
    users:
      - ‘system:serviceaccount:kube-system:namespace-controller‘
    verbs:
      - deletecollection
  # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
  # so only log at the Metadata level.
  - level: Metadata
    omitStages:
      - RequestReceived
    resources:
      - group: ""
        resources:
          - secrets
          - configmaps
      - group: authentication.k8s.io
        resources:
          - tokenreviews
  # Get repsonses can be large; skip them.
  - level: Request
    omitStages:
      - RequestReceived
    resources:
      - group: ""
      - group: admissionregistration.k8s.io
      - group: apiextensions.k8s.io
      - group: apiregistration.k8s.io
      - group: apps
      - group: authentication.k8s.io
      - group: authorization.k8s.io
      - group: autoscaling
      - group: batch
      - group: certificates.k8s.io
      - group: extensions
      - group: metrics.k8s.io
      - group: networking.k8s.io
      - group: policy
      - group: rbac.authorization.k8s.io
      - group: scheduling.k8s.io
      - group: settings.k8s.io
      - group: storage.k8s.io
    verbs:
      - get
      - list
      - watch
  # Default level for known APIs
  - level: RequestResponse
    omitStages:
      - RequestReceived
    resources:
      - group: ""
      - group: admissionregistration.k8s.io
      - group: apiextensions.k8s.io
      - group: apiregistration.k8s.io
      - group: apps
      - group: authentication.k8s.io
      - group: authorization.k8s.io
      - group: autoscaling
      - group: batch
      - group: certificates.k8s.io
      - group: extensions
      - group: metrics.k8s.io
      - group: networking.k8s.io
      - group: policy
      - group: rbac.authorization.k8s.io
      - group: scheduling.k8s.io
      - group: settings.k8s.io
      - group: storage.k8s.io
  # Default level for all other requests.
  - level: Metadata
    omitStages:
      - RequestReceived
EOF

cp audit-policy.yaml /etc/kubernetes/audit-policy.yaml    ##分发到所有master节点

4、创建证书签名请求

cat > proxy-client-csr.json <<EOF
{
  "CN": "aggregator",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "4Paradigm"
    }
  ]
}
EOF

##生成公私钥
cfssl gencert -ca=/etc/kubernetes/cert/ca.pem   -ca-key=/etc/kubernetes/cert/ca-key.pem    -config=/etc/kubernetes/cert/ca-config.json    -profile=kubernetes proxy-client-csr.json | cfssljson -bare proxy-client

ls proxy-client*.pem

cp proxy-client*.pem /etc/kubernetes/cert/         ##分发到所有master节点
  • 三、启动kube-apiserver
    1、创建kube-apiserver.service
    cat > /etc/systemd/system/kube-apiserver.service <<EOF
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    After=network.target
    [Service]
    WorkingDirectory=/data/k8s/k8s/kube-apiserver
    ExecStart=/opt/k8s/bin/kube-apiserver    --enable-admission-plugins=MutatingAdmissionWebhook,ValidatingAdmissionWebhook,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction    --advertise-address=10.13.33.38 \                                ##修改为节点ip
       --default-not-ready-toleration-seconds=360    --default-unreachable-toleration-seconds=360    --feature-gates=DynamicAuditing=true \    --max-mutating-requests-inflight=2000    --max-requests-inflight=4000    --default-watch-cache-size=200    --delete-collection-workers=2    --encryption-provider-config=/etc/kubernetes/encryption-config.yaml    --etcd-cafile=/etc/kubernetes/cert/ca.pem    --etcd-certfile=/etc/kubernetes/cert/kubernetes.pem    --etcd-keyfile=/etc/kubernetes/cert/kubernetes-key.pem    --etcd-servers=https://10.13.33.38:2379,https://10.13.33.29:2379,https://10.13.33.40:2379 \             ##etcd节点ip
       --bind-address=10.13.33.38 \                                         ##修改为节点ip
       --insecure-bind-address=127.0.0.1    --secure-port=6443    --tls-cert-file=/etc/kubernetes/cert/kubernetes.pem    --tls-private-key-file=/etc/kubernetes/cert/kubernetes-key.pem    --insecure-port=0    --audit-dynamic-configuration    --audit-log-maxage=15    --audit-log-maxbackup=3    --audit-log-maxsize=100    --audit-log-truncate-enabled    --audit-log-path=/data/k8s/k8s/kube-apiserver/audit.log    --audit-policy-file=/etc/kubernetes/audit-policy.yaml    --profiling    --anonymous-auth=false    --client-ca-file=/etc/kubernetes/cert/ca.pem    --enable-bootstrap-token-auth    --requestheader-allowed-names="aggregator"    --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem    --requestheader-extra-headers-prefix="X-Remote-Extra-"    --requestheader-group-headers=X-Remote-Group    --requestheader-username-headers=X-Remote-User    --service-account-key-file=/etc/kubernetes/cert/ca.pem    --authorization-mode=Node,RBAC    --runtime-config=api/all=true    --enable-admission-plugins=NodeRestriction    --allow-privileged=true    --apiserver-count=3    --event-ttl=168h    --kubelet-certificate-authority=/etc/kubernetes/cert/ca.pem    --kubelet-client-certificate=/etc/kubernetes/cert/kubernetes.pem    --kubelet-client-key=/etc/kubernetes/cert/kubernetes-key.pem    --kubelet-https=true    --kubelet-timeout=10s    --proxy-client-cert-file=/etc/kubernetes/cert/proxy-client.pem    --proxy-client-key-file=/etc/kubernetes/cert/proxy-client-key.pem    --service-cluster-ip-range=10.254.0.0/16 \                 ##集群ip子网
       --service-node-port-range=30000-40000 \                ##使用的端口范围
       --logtostderr=false    --v=2    --log-dir=/data/k8s/k8s/kube-apiserver    --basic-auth-file=/etc/kubernetes/basic_auth_file
    Restart=on-failure
    RestartSec=10
    Type=notify
    LimitNOFILE=65536
    [Install]
    WantedBy=multi-user.target
    EOF
    ##分发到所有master节点
cp kube-apiserver.service /etc/systemd/system/kube-apiserver.service 
    ##创建工作目录
mkdir -p /data/k8s/k8s/kube-apiserver
    ##启动kube-apiserver
systemctl daemon-reload && systemctl enable kube-apiserver && systemctl restart kube-apiserver
   ##检查kube-apiserver监听的端口
netstat -lntup|grep kube
  ##确保状态为active (running),否则查看日志,确认原因
 journalctl -fu kube-apiserver
  • 四、部署控制器管理器troller-manager

1、制作证书

cd /opt/k8s/work

cat > kube-controller-manager-csr.json <<EOF
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
      "127.0.0.1",
      "10.13.33.29",
      "10.13.33.38",
      "10.13.33.40"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "BeiJing",
        "L": "BeiJing",
        "O": "system:kube-controller-manager",
        "OU": "4Paradigm"
      }
    ]
}
EOF

###这里的IP地址为master ip

##创建分发证书
cfssl gencert -ca=/opt/k8s/work/ca.pem   -ca-key=/opt/k8s/work/ca-key.pem   -config=/opt/k8s/work/ca-config.json   -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
ls kube-controller-manager*pem

cp kube-controller-manager*.pem /etc/kubernetes/cert/        ##分发到所有master节点

##创建和分发kubeconfig文件
kubectl config set-cluster kubernetes   --certificate-authority=/opt/k8s/work/ca.pem   --embed-certs=true   --server=https://10.13.33.170:8443 \                  ##vip
  --kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager   --client-certificate=kube-controller-manager.pem   --client-key=kube-controller-manager-key.pem   --embed-certs=true   --kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-context system:kube-controller-manager   --cluster=kubernetes   --user=system:kube-controller-manager   --kubeconfig=kube-controller-manager.kubeconfig
kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

cp kube-controller-manager.kubeconfig /etc/kubernetes/                       ##分发到所有master节点

5、启动kube-controller-manager

##创建kube-controller-manager启动文件
cat > /etc/systemd/system/kube-controller-manager.service <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
WorkingDirectory=${K8S_DIR}/kube-controller-manager
ExecStart=/opt/k8s/bin/kube-controller-manager \  --profiling \  --cluster-name=kubernetes \  --controllers=*,bootstrapsigner,tokencleaner \  --kube-api-qps=1000 \  --kube-api-burst=2000 \  --leader-elect \  --use-service-account-credentials\  --concurrent-service-syncs=2 \  --bind-address=0.0.0.0 \  #--secure-port=10252 \  --tls-cert-file=/etc/kubernetes/cert/kube-controller-manager.pem \  --tls-private-key-file=/etc/kubernetes/cert/kube-controller-manager-key.pem \  #--port=0 \  --authentication-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \  --client-ca-file=/etc/kubernetes/cert/ca.pem \  --requestheader-allowed-names="" \  --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \  --requestheader-extra-headers-prefix="X-Remote-Extra-" \  --requestheader-group-headers=X-Remote-Group \  --requestheader-username-headers=X-Remote-User \  --authorization-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \  --cluster-signing-cert-file=/etc/kubernetes/cert/ca.pem \  --cluster-signing-key-file=/etc/kubernetes/cert/ca-key.pem \  --experimental-cluster-signing-duration=876000h \  --horizontal-pod-autoscaler-sync-period=10s \  --concurrent-deployment-syncs=10 \  --concurrent-gc-syncs=30 \  --node-cidr-mask-size=24 \  --service-cluster-ip-range=${SERVICE_CIDR} \  --pod-eviction-timeout=6m \  --terminated-pod-gc-threshold=10000 \  --root-ca-file=/etc/kubernetes/cert/ca.pem \  --service-account-private-key-file=/etc/kubernetes/cert/ca-key.pem \  --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \  --logtostderr=true \  --v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF

mkdir -p /data/k8s/k8s/kube-controller-manager

##启动 kube-controller-manager
systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl restart kube-controller-manager

systemctl status kube-controller-manager
netstat -lnpt | grep kube-cont

跟着炎炎盐实践k8s---Kubernetes1.16.10 二进制高可用集群部署之master节点

标签:etcd   lnp   not   ane   drop   cto   mission   health   高可用集群   

原文地址:https://blog.51cto.com/13534471/2507995

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!