码迷,mamicode.com
首页 > Web开发 > 详细

Kubernetes部署master服务

时间:2020-04-06 00:22:25      阅读:254      评论:0      收藏:0      [点我收藏+]

标签:mit   pem   maxsize   manager   swa   ati   sed   vat   rest   

一、初始化服务器

1 关闭防火墙

? 【所有主节点都执行】
?

[root@k8s-master1 ~]# systemctl stop firewalld
[root@k8s-master1 ~]# systemctl disable firewalld
2 关闭selinux

? 【所有主节点都执行】

# setenforce 0
# vim /etc/selinux/config
修改SELINUX=enforcing 为 SELINUX=disabled
3 配置主机名

? 【所有主节点都执行】
? hostnamectl set-hostname 主机名

4 配置名称解析

? 【所有主节点都执行】

vim /etc/hosts

? 添加如下四行

192.168.31.63    k8s-master1
192.168.31.64    k8s-master2
192.168.31.65    k8s-node1
192.168.31.66    k8s-node2
5 配置时间同步

? 选择一个节点作为服务端,剩下的作为客户端
? master1为时间服务器的服务端
? 其他的为时间服务器的客户端
?
? 1)配置k8s-master1

# yum install chrony -y
# vim /etc/chrony.config
    #修改三项
    server 127.127.1.0 iburst
    allow 192.168.31.0/24
    local stratum 10

# systemctl start chronyd
# systemctl enable chronyd
# ss -unl | grep 123
        UNCONN     0      0            *:123                      *:*   

?
? 2)配置k8s-node1 和k8s-node2

# yum install chrony -y
# vim /etc/chrony.conf
            server 192.168.31.63 iburst

# systemctl start chronyd
# systemctl enable chronyd
# chronyc sources

            210 Number of sources = 1
            MS Name/IP address         Stratum Poll Reach LastRx Last sample               
            ============================================================
^* k8s-master1                  10   6    17     4    +11us[  +79us] +/-   95us

6 关闭交换分区
【所有主节点都执行】

[root@k8s-master1 ~]# swapoff -a
[root@k8s-master1 ~]# vim /etc/fstab

? 删除一行:
? 检查是否关闭成功

[root@k8s-master1 ~]# free -m
                  total        used        free      shared  buff/cache   available
    Mem:           2827         157        2288           9         380        2514
    Swap:             0           0           0

三、给etcd颁发证书

1)创建证书颁发机构
2)填写表单--写明etcd所在节点的IP
3)向证书颁发机构申请证书

    第一步:上传TLS安装包
        传到/root下
        略
    第二步:
        # tar xvf /root/TLS.tar.gz
        # cd /root/TLS
        # vim server-csr.json 
            修改host中的IP地址,这里的IP是etcd所在节点的IP地址
            {
                "CN": "etcd",
                "hosts": [
                    "192.168.31.63",
                    "192.168.31.65",
                    "192.168.31.66"
                    ],
                "key": {
                    "algo": "rsa",
                    "size": 2048
                },
                "names": [
                    {
                        "C": "CN",
                        "L": "BeiJing",
                        "ST": "BeiJing"
                    }
                ]
            }
        # ./generate_etcd_cert.sh
        # ls *pem
            ca-key.pem  ca.pem  server-key.pem  server.pem

四、部署etcd

etcd需要三台虚拟机
在master、node1、node2上分别安装一个etcd

注意:
    解压之后会生成一个文件和一个目录

    # tar xvf etcd.tar.gz 
    # mv etcd.service /usr/lib/systemd/system
    # vim /opt/etcd/cfg/etcd.conf
        #[Member]
        ETCD_NAME="etcd-1"
        ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
        ETCD_LISTEN_PEER_URLS="https://192.168.31.63:2380"
        ETCD_LISTEN_CLIENT_URLS="https://192.168.31.63:2379"

        #[Clustering]
        ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.63:2380"
        ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.63:2379"
        ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.31.63:2380,etcd-2=https://192.168.31.65:2380,etcd-3=https://192.168.31.66:2380"
        ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
        ETCD_INITIAL_CLUSTER_STATE="new"
    # rm -rf /opt/etcd/ssl/*
    # \cp -fv ca.pem server.pem server-key.pem /opt/etcd/ssl/ 

    将etc管理程序和程序目录发送到node1 和node2
    # scp /usr/lib/systemd/system/etcd.service root@k8s-node1:/usr/lib/systemd/system/
    # scp /usr/lib/systemd/system/etcd.service root@k8s-node2:/usr/lib/systemd/system/
    # scp -r /opt/etcd/ root@k8s-node2:/opt/
    # scp -r /opt/etcd/ root@k8s-node1:/opt/

    在node1上修改etcd的配置文件
    # vim /opt/etcd/cfg/etcd.conf
    #[Member]
    ETCD_NAME="etcd-2"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://192.168.31.65:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.31.65:2379"

    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.65:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.65:2379"
    ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.31.63:2380,etcd-2=https://192.168.31.65:2380,etcd-3=https://192.168.31.66:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"

    在node2上修改etcd的配置文件
    # vim /opt/etcd/cfg/etcd.conf
    #[Member]
    ETCD_NAME="etcd-3"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://192.168.31.66:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.31.66:2379"

    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.66:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.66:2379"
    ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.31.63:2380,etcd-2=https://192.168.31.65:2380,etcd-3=https://192.168.31.66:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    在三个节点一次启动etcd服务
    # systemctl start etcd
    # systemctl enable etcd

    检查是否启动成功
    # /opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.31.63:2379,https://192.168.31.65:2379,https://192.168.31.66:2379" cluster-health

五、为api server签发证书

cd /root/TLS/k8s/
./generate_k8s_cert.sh 

六、部署master服务

# tar xvf k8s-master.tar.gz 
# mv kube-apiserver.service kube-controller-manager.service kube-scheduler.service /usr/lib/systemd/system/
# mv kubernetes /opt/
# cp /root/TLS/k8s/{ca*pem,server.pem,server-key.pem} /opt/kubernetes/ssl/ -rvf
修改apiserver的配置文件
# vim /opt/kubernetes/cfg/kube-apiserver.conf 
    KUBE_APISERVER_OPTS="--logtostderr=false     --v=2     --log-dir=/opt/kubernetes/logs     --etcd-servers=https://192.168.31.63:2379,https://192.168.31.65:2379,https://192.168.31.66:2379     --bind-address=192.168.31.63     --secure-port=6443     --advertise-address=192.168.31.63     --allow-privileged=true     --service-cluster-ip-range=10.0.0.0/24     --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction     --authorization-mode=RBAC,Node     --enable-bootstrap-token-auth=true     --token-auth-file=/opt/kubernetes/cfg/token.csv     --service-node-port-range=30000-32767     --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem     --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem     --tls-cert-file=/opt/kubernetes/ssl/server.pem      --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem     --client-ca-file=/opt/kubernetes/ssl/ca.pem     --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem     --etcd-cafile=/opt/etcd/ssl/ca.pem     --etcd-certfile=/opt/etcd/ssl/server.pem     --etcd-keyfile=/opt/etcd/ssl/server-key.pem     --audit-log-maxage=30     --audit-log-maxbackup=3     --audit-log-maxsize=100     --audit-log-path=/opt/kubernetes/logs/k8s-audit.log"

启动master
    # systemctl start kube-apiserver
    # systemctl enable kube-apiserver
    # systemctl enable kube-scheduler
    # systemctl start kube-scheduler
    # systemctl start kube-controller-manager
    # systemctl start kube-scheduler
    # systemctl enable kube-controller-manager
    # cp /opt/kubernetes/bin/kubectl /bin/
检查启动结果
    # ps aux |grep kube
    # ps aux |grep kube | wc -4

    # kubectl get cs
    NAME                 AGE
    controller-manager   <unknown>
    scheduler            <unknown>
    etcd-1               <unknown>
    etcd-2               <unknown>
    etcd-0               <unknown>

配置tls 基于bootstrap自动颁发证书
    # kubectl create clusterrolebinding kubelet-bootstrap     --clusterrole=system:node-bootstrapper     --user=kubelet-bootstrap

Kubernetes部署master服务

标签:mit   pem   maxsize   manager   swa   ati   sed   vat   rest   

原文地址:https://blog.51cto.com/11742478/2484941

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!