码迷,mamicode.com
首页 > 其他好文 > 详细

Ceph 13.2.8 三节点部署

时间:2020-02-16 20:27:01      阅读:123      评论:0      收藏:0      [点我收藏+]

标签:etc   health   local   ORC   mat   其他   mac   fstab   trap   

bs-k8s-ceph 20.0.0.208     eth010.0.0.208     eth1 mon osd mgr deploy 2c2g sdb  sdc  sdd   各20G
bs-hk-hk01 20.0.0.206     eth010.0.0.206     eth1 mon osd mgr 2c2g sdb  sdc  sdd   各20G
bs-hk-hk02 20.0.0.207     eth010.0.0.207     eth0 mon osd mgr 2c2g sdb  sdc  sdd   各20G
bs-k8s-node01 20.0.0.203     eth010.0.0.203     eth1 客户端 2c2g sdb  sdc  sdd   各20G
bs-k8s-node02 20.0.0.204     eth010.0.0.204     eth1 客户端 2c2g sdb  sdc  sdd   各20G
bs-k8s-node03 20.0.0.205     eth010.0.0.205     eth1 客户端 2c2g sdb  sdc  sdd   各20G

注:# 为bs-k8s-ceph   bs-hk-hk01  bs-hk-hk02 上都操作
# uname -a
Linux bs-k8s-ceph 4.4.186-1.el7.elrepo.x86_64 #1 SMP Sun Jul 21 04:06:52 EDT 2019 x86_64 x86_64 x86_64 GNU/Linux
# hostname -I
20.0.0.208 10.0.0.208 
# ls /dev/sd*
/dev/sda  /dev/sda1  /dev/sda2  /dev/sda3  /dev/sdb  /dev/sdc  /dev/sdd
# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
   Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
   Active: inactive (dead)
     Docs: man:firewalld(1)
# getenforce 
Disabled

时间同步    采取内外双向同步法
外同步
# yum install -y ntpdate
# ntpdate cn.pool.ntp.org

内同步
# yum -y install chrony
[root@bs-k8s-ceph ~]# cp /etc/chrony.conf{,.bak}
[root@bs-k8s-ceph ~]# vim /etc/chrony.conf
[root@bs-k8s-ceph ~]# diff /etc/chrony.conf{,.bak}
26c26
< allow 10.0.0.0/24
---
> #allow 192.168.0.0/16
[root@bs-hk-hk01 ~]# cp /etc/chrony.conf{,.bak}
[root@bs-hk-hk01 ~]# vim /etc/chrony.conf
[root@bs-hk-hk01 ~]# diff /etc/chrony.conf{,.bak}
3c3,6
< server 10.0.0.208 iburst
---
> server 0.centos.pool.ntp.org iburst
> server 1.centos.pool.ntp.org iburst
> server 2.centos.pool.ntp.org iburst
> server 3.centos.pool.ntp.org iburst
[root@bs-hk-hk01 ~]# scp /etc/chrony.conf 20.0.0.207:/etc/chrony.conf
# systemctl restart chronyd.service
# chronyc sources -v

ssh-keygen信任
[root@bs-k8s-ceph ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
10.0.0.206  bs-hk-hk01
10.0.0.207  bs-hk-hk02
10.0.0.208  bs-k8s-ceph
[root@bs-k8s-ceph ~]# mkdir /service/scripts -p
[root@bs-k8s-ceph ~]# vim /service/scripts/ssh-key.sh
[root@bs-k8s-ceph ~]# vim /service/scripts/ssh-key.sh
[root@bs-k8s-ceph ~]# cat /service/scripts/ssh-key.sh
##########################################################################
#Author:                     zisefeizhu
#QQ:                         2********0
#Date:                       2020-02-11
#FileName:                   /service/scripts/ssh-key.sh
#URL:                        https://www.cnblogs.com/zisefeizhu/
#Description:                The test script
#Copyright (C):              2020 All rights reserved
##########################################################################
#!/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin
export $PATH
#目标主机列表
IP="
10.0.0.206
bs-hk-hk01
10.0.0.207
bs-hk-hk02
10.0.0.208
bs-k8s-ceph
"
for node in ${IP};do
  sshpass -p 1 ssh-copy-id  ${node}  -o StrictHostKeyChecking=no
  scp /etc/hosts ${node}:/etc/hosts
  if [ $? -eq 0 ];then
    echo "${node} 秘钥copy完成"
  else
    echo "${node} 秘钥copy失败"
  fi
done
[root@bs-k8s-ceph ~]# yum install -y sshpass
[root@bs-k8s-ceph ~]# ssh-keygen -t rsa

准备数据盘
# lsblk 
NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda      8:0    0   20G  0 disk 
├─sda1   8:1    0    1G  0 part /boot
├─sda2   8:2    0    1G  0 part [SWAP]
└─sda3   8:3    0   18G  0 part /
sdb      8:16   0   20G  0 disk 
sdc      8:32   0   20G  0 disk 
sdd      8:48   0   20G  0 disk 
sr0     11:0    1  918M  0 rom  
在每个节点上为Journal磁盘分区, 分别为 sdb1, sdb2, 各自对应本机的2个OSD,  journal磁盘对应osd的大小为25%
使用 parted 命令进行创建分区操作
# parted /dev/sdb
GNU Parted 3.1
使用 /dev/sdb
Welcome to GNU Parted! Type help to view a list of commands.
(parted) mklabel gpt
(parted) mkpart primary xfs  0% 50%                                       
(parted) mkpart primary xfs  50% 100%                                       
(parted) q                                                                
信息: You may need to update /etc/fstab.

# lsblk                                               
NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
......
sdb      8:16   0   20G  0 disk 
├─sdb1   8:17   0   10G  0 part 
└─sdb2   8:18   0   10G  0 part 
......

配置源
# cat /etc/yum.repos.d/ceph.repo 
[Ceph]
name=Ceph packages for $basearch
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
 
[Ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
 
[ceph-source]
name=Ceph source packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/SRPMS
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
# yum clean all && yum makecache
# yum install snappy  leveldb gdisk python-argparse gperftools-libs  -y
 
 部署ceph-deploy   bs-k8s-ceph  
[root@bs-k8s-ceph ~]# yum install -y ceph-deploy python-pip
[root@bs-k8s-ceph ~]# ceph-deploy --version
2.0.1

管理节点创建工作目录
 [root@bs-k8s-ceph ~]# mkdir /etc/ceph
 
 创建三个mon
[root@bs-k8s-ceph ceph]# ceph-deploy new bs-k8s-ceph bs-hk-hk01 bs-hk-hk02
[root@bs-k8s-ceph ceph]# ls
ceph.conf  ceph-deploy-ceph.log  ceph.mon.keyring
[root@bs-k8s-ceph ceph]# cp ceph.conf ceph.conf-`date +%F`
[root@bs-k8s-ceph ceph]# diff ceph.conf ceph.conf-2020-02-11 
8,9d7
< public network = 20.0.0.0/24
< cluster network = 10.0.0.0/24

安装ceph
#yum install -y ceph ceph-radosgw
初始monitor 并收集密钥
分发密钥到其他节点
[root@bs-k8s-ceph ceph]# ceph-deploy admin bs-k8s-ceph bs-hk-hk01 bs-hk-hk02

[root@bs-k8s-ceph ceph]# ceph-deploy osd create bs-k8s-ceph --data /dev/sdc --journal /dev/sdb1
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create bs-k8s-ceph --data /dev/sdc --journal /dev/sdb1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  bluestore                     : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fa1152d8680>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  block_wal                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  journal                       : /dev/sdb1
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  host                          : bs-k8s-ceph
[ceph_deploy.cli][INFO  ]  filestore                     : None
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7fa1155228c0>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdc
[ceph_deploy.cli][INFO  ]  block_db                      : None
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdc
[bs-k8s-ceph][DEBUG ] connected to host: bs-k8s-ceph 
[bs-k8s-ceph][DEBUG ] detect platform information from remote host
[bs-k8s-ceph][DEBUG ] detect machine type
[bs-k8s-ceph][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to bs-k8s-ceph
[bs-k8s-ceph][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[bs-k8s-ceph][WARNIN] osd keyring does not exist yet, creating one
[bs-k8s-ceph][DEBUG ] create a keyring file
[bs-k8s-ceph][DEBUG ] find the location of an executable
[bs-k8s-ceph][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdc
[bs-k8s-ceph][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[bs-k8s-ceph][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 37d3767e-4e21-448c-91f4-d3a1079bd8bd
[bs-k8s-ceph][WARNIN] Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-9f219cda-7297-4ea0-8b79-2899b68a94d9 /dev/sdc
[bs-k8s-ceph][WARNIN]  stdout: Physical volume "/dev/sdc" successfully created.
[bs-k8s-ceph][WARNIN]  stdout: Volume group "ceph-9f219cda-7297-4ea0-8b79-2899b68a94d9" successfully created
[bs-k8s-ceph][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-37d3767e-4e21-448c-91f4-d3a1079bd8bd ceph-9f219cda-7297-4ea0-8b79-2899b68a94d9
[bs-k8s-ceph][WARNIN]  stdout: Logical volume "osd-block-37d3767e-4e21-448c-91f4-d3a1079bd8bd" created.
[bs-k8s-ceph][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[bs-k8s-ceph][WARNIN] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0
[bs-k8s-ceph][WARNIN] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-0
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -h ceph:ceph /dev/ceph-9f219cda-7297-4ea0-8b79-2899b68a94d9/osd-block-37d3767e-4e21-448c-91f4-d3a1079bd8bd
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-0
[bs-k8s-ceph][WARNIN] Running command: /bin/ln -s /dev/ceph-9f219cda-7297-4ea0-8b79-2899b68a94d9/osd-block-37d3767e-4e21-448c-91f4-d3a1079bd8bd /var/lib/ceph/osd/ceph-0/block
[bs-k8s-ceph][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap
[bs-k8s-ceph][WARNIN]  stderr: got monmap epoch 1
[bs-k8s-ceph][WARNIN] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-0/keyring --create-keyring --name osd.0 --add-key AQA4ckJerZKGGBAAK2c6hoeR8WkOAZMi3h7vjw==
[bs-k8s-ceph][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-0/keyring
[bs-k8s-ceph][WARNIN] added entity osd.0 auth auth(auid = 18446744073709551615 key=AQA4ckJerZKGGBAAK2c6hoeR8WkOAZMi3h7vjw== with 0 caps)
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/
[bs-k8s-ceph][WARNIN] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 37d3767e-4e21-448c-91f4-d3a1079bd8bd --setuser ceph --setgroup ceph
[bs-k8s-ceph][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdc
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
[bs-k8s-ceph][WARNIN] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-9f219cda-7297-4ea0-8b79-2899b68a94d9/osd-block-37d3767e-4e21-448c-91f4-d3a1079bd8bd --path /var/lib/ceph/osd/ceph-0 --no-mon-config
[bs-k8s-ceph][WARNIN] Running command: /bin/ln -snf /dev/ceph-9f219cda-7297-4ea0-8b79-2899b68a94d9/osd-block-37d3767e-4e21-448c-91f4-d3a1079bd8bd /var/lib/ceph/osd/ceph-0/block
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-0
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
[bs-k8s-ceph][WARNIN] Running command: /bin/systemctl enable ceph-volume@lvm-0-37d3767e-4e21-448c-91f4-d3a1079bd8bd
[bs-k8s-ceph][WARNIN]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-0-37d3767e-4e21-448c-91f4-d3a1079bd8bd.service to /usr/lib/systemd/system/ceph-volume@.service.
[bs-k8s-ceph][WARNIN] Running command: /bin/systemctl enable --runtime ceph-osd@0
[bs-k8s-ceph][WARNIN]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@0.service to /usr/lib/systemd/system/ceph-osd@.service.
[bs-k8s-ceph][WARNIN] Running command: /bin/systemctl start ceph-osd@0
[bs-k8s-ceph][WARNIN] --> ceph-volume lvm activate successful for osd ID: 0
[bs-k8s-ceph][WARNIN] --> ceph-volume lvm create successful for: /dev/sdc
[bs-k8s-ceph][INFO  ] checking OSD status...
[bs-k8s-ceph][DEBUG ] find the location of an executable
[bs-k8s-ceph][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host bs-k8s-ceph is now ready for osd use.
[root@bs-k8s-ceph ceph]# ceph-deploy osd create bs-k8s-ceph --data /dev/sdd --journal /dev/sdb1
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create bs-k8s-ceph --data /dev/sdd --journal /dev/sdb1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  bluestore                     : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f342f4d0680>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  block_wal                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  journal                       : /dev/sdb1
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  host                          : bs-k8s-ceph
[ceph_deploy.cli][INFO  ]  filestore                     : None
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7f342f71a8c0>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdd
[ceph_deploy.cli][INFO  ]  block_db                      : None
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdd
[bs-k8s-ceph][DEBUG ] connected to host: bs-k8s-ceph 
[bs-k8s-ceph][DEBUG ] detect platform information from remote host
[bs-k8s-ceph][DEBUG ] detect machine type
[bs-k8s-ceph][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to bs-k8s-ceph
[bs-k8s-ceph][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[bs-k8s-ceph][DEBUG ] find the location of an executable
[bs-k8s-ceph][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdd
[bs-k8s-ceph][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[bs-k8s-ceph][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 0f718ba7-de6b-4d22-b74d-90657e220121
[bs-k8s-ceph][WARNIN] Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-b722303f-5fee-4425-84d1-057b1d05ca96 /dev/sdd
[bs-k8s-ceph][WARNIN]  stdout: Physical volume "/dev/sdd" successfully created.
[bs-k8s-ceph][WARNIN]  stdout: Volume group "ceph-b722303f-5fee-4425-84d1-057b1d05ca96" successfully created
[bs-k8s-ceph][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-0f718ba7-de6b-4d22-b74d-90657e220121 ceph-b722303f-5fee-4425-84d1-057b1d05ca96
[bs-k8s-ceph][WARNIN]  stdout: Logical volume "osd-block-0f718ba7-de6b-4d22-b74d-90657e220121" created.
[bs-k8s-ceph][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[bs-k8s-ceph][WARNIN] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-1
[bs-k8s-ceph][WARNIN] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-1
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -h ceph:ceph /dev/ceph-b722303f-5fee-4425-84d1-057b1d05ca96/osd-block-0f718ba7-de6b-4d22-b74d-90657e220121
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-1
[bs-k8s-ceph][WARNIN] Running command: /bin/ln -s /dev/ceph-b722303f-5fee-4425-84d1-057b1d05ca96/osd-block-0f718ba7-de6b-4d22-b74d-90657e220121 /var/lib/ceph/osd/ceph-1/block
[bs-k8s-ceph][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-1/activate.monmap
[bs-k8s-ceph][WARNIN]  stderr: got monmap epoch 1
[bs-k8s-ceph][WARNIN]  stderr: 
[bs-k8s-ceph][WARNIN] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-1/keyring --create-keyring --name osd.1 --add-key AQBMckJeIKP3FhAAqOLDpdF+kDfZu5cQC0zmww==
[bs-k8s-ceph][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-1/keyring
[bs-k8s-ceph][WARNIN] added entity osd.1 auth auth(auid = 18446744073709551615 key=AQBMckJeIKP3FhAAqOLDpdF+kDfZu5cQC0zmww== with 0 caps)
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/keyring
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/
[bs-k8s-ceph][WARNIN] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 1 --monmap /var/lib/ceph/osd/ceph-1/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-1/ --osd-uuid 0f718ba7-de6b-4d22-b74d-90657e220121 --setuser ceph --setgroup ceph
[bs-k8s-ceph][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdd
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
[bs-k8s-ceph][WARNIN] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-b722303f-5fee-4425-84d1-057b1d05ca96/osd-block-0f718ba7-de6b-4d22-b74d-90657e220121 --path /var/lib/ceph/osd/ceph-1 --no-mon-config
[bs-k8s-ceph][WARNIN] Running command: /bin/ln -snf /dev/ceph-b722303f-5fee-4425-84d1-057b1d05ca96/osd-block-0f718ba7-de6b-4d22-b74d-90657e220121 /var/lib/ceph/osd/ceph-1/block
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-1
[bs-k8s-ceph][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1
[bs-k8s-ceph][WARNIN] Running command: /bin/systemctl enable ceph-volume@lvm-1-0f718ba7-de6b-4d22-b74d-90657e220121
[bs-k8s-ceph][WARNIN]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-1-0f718ba7-de6b-4d22-b74d-90657e220121.service to /usr/lib/systemd/system/ceph-volume@.service.
[bs-k8s-ceph][WARNIN] Running command: /bin/systemctl enable --runtime ceph-osd@1
[bs-k8s-ceph][WARNIN]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@1.service to /usr/lib/systemd/system/ceph-osd@.service.
[bs-k8s-ceph][WARNIN] Running command: /bin/systemctl start ceph-osd@1
[bs-k8s-ceph][WARNIN] --> ceph-volume lvm activate successful for osd ID: 1
[bs-k8s-ceph][WARNIN] --> ceph-volume lvm create successful for: /dev/sdd
[bs-k8s-ceph][INFO  ] checking OSD status...
[bs-k8s-ceph][DEBUG ] find the location of an executable
[bs-k8s-ceph][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host bs-k8s-ceph is now ready for osd use.
[root@bs-k8s-ceph ceph]# ceph-deploy osd create bs-hk-hk01 --data /dev/sdc --journal /dev/sdb1
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create bs-hk-hk01 --data /dev/sdc --journal /dev/sdb1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  bluestore                     : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fc87226c680>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  block_wal                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  journal                       : /dev/sdb1
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  host                          : bs-hk-hk01
[ceph_deploy.cli][INFO  ]  filestore                     : None
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7fc8724b68c0>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdc
[ceph_deploy.cli][INFO  ]  block_db                      : None
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdc
[bs-hk-hk01][DEBUG ] connected to host: bs-hk-hk01 
[bs-hk-hk01][DEBUG ] detect platform information from remote host
[bs-hk-hk01][DEBUG ] detect machine type
[bs-hk-hk01][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to bs-hk-hk01
[bs-hk-hk01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[bs-hk-hk01][WARNIN] osd keyring does not exist yet, creating one
[bs-hk-hk01][DEBUG ] create a keyring file
[bs-hk-hk01][DEBUG ] find the location of an executable
[bs-hk-hk01][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdc
[bs-hk-hk01][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[bs-hk-hk01][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 0f7e4e52-5e01-45bd-9ef6-55ec53db45e9
[bs-hk-hk01][WARNIN] Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-8ee7795f-ac6c-42a8-b930-44f06f4d19b6 /dev/sdc
[bs-hk-hk01][WARNIN]  stdout: Physical volume "/dev/sdc" successfully created.
[bs-hk-hk01][WARNIN]  stdout: Volume group "ceph-8ee7795f-ac6c-42a8-b930-44f06f4d19b6" successfully created
[bs-hk-hk01][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-0f7e4e52-5e01-45bd-9ef6-55ec53db45e9 ceph-8ee7795f-ac6c-42a8-b930-44f06f4d19b6
[bs-hk-hk01][WARNIN]  stdout: Logical volume "osd-block-0f7e4e52-5e01-45bd-9ef6-55ec53db45e9" created.
[bs-hk-hk01][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[bs-hk-hk01][WARNIN] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-2
[bs-hk-hk01][WARNIN] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-2
[bs-hk-hk01][WARNIN] Running command: /bin/chown -h ceph:ceph /dev/ceph-8ee7795f-ac6c-42a8-b930-44f06f4d19b6/osd-block-0f7e4e52-5e01-45bd-9ef6-55ec53db45e9
[bs-hk-hk01][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-0
[bs-hk-hk01][WARNIN] Running command: /bin/ln -s /dev/ceph-8ee7795f-ac6c-42a8-b930-44f06f4d19b6/osd-block-0f7e4e52-5e01-45bd-9ef6-55ec53db45e9 /var/lib/ceph/osd/ceph-2/block
[bs-hk-hk01][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-2/activate.monmap
[bs-hk-hk01][WARNIN]  stderr: got monmap epoch 1
[bs-hk-hk01][WARNIN] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-2/keyring --create-keyring --name osd.2 --add-key AQB0ckJeXwuZHRAA6nfA5V/kCZs9FjJYm6JOfw==
[bs-hk-hk01][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-2/keyring
[bs-hk-hk01][WARNIN] added entity osd.2 auth auth(auid = 18446744073709551615 key=AQB0ckJeXwuZHRAA6nfA5V/kCZs9FjJYm6JOfw== with 0 caps)
[bs-hk-hk01][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/keyring
[bs-hk-hk01][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/
[bs-hk-hk01][WARNIN] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 2 --monmap /var/lib/ceph/osd/ceph-2/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-2/ --osd-uuid 0f7e4e52-5e01-45bd-9ef6-55ec53db45e9 --setuser ceph --setgroup ceph
[bs-hk-hk01][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdc
[bs-hk-hk01][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2
[bs-hk-hk01][WARNIN] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-8ee7795f-ac6c-42a8-b930-44f06f4d19b6/osd-block-0f7e4e52-5e01-45bd-9ef6-55ec53db45e9 --path /var/lib/ceph/osd/ceph-2 --no-mon-config
[bs-hk-hk01][WARNIN] Running command: /bin/ln -snf /dev/ceph-8ee7795f-ac6c-42a8-b930-44f06f4d19b6/osd-block-0f7e4e52-5e01-45bd-9ef6-55ec53db45e9 /var/lib/ceph/osd/ceph-2/block
[bs-hk-hk01][WARNIN] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block
[bs-hk-hk01][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-0
[bs-hk-hk01][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2
[bs-hk-hk01][WARNIN] Running command: /bin/systemctl enable ceph-volume@lvm-2-0f7e4e52-5e01-45bd-9ef6-55ec53db45e9
[bs-hk-hk01][WARNIN]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-2-0f7e4e52-5e01-45bd-9ef6-55ec53db45e9.service to /usr/lib/systemd/system/ceph-volume@.service.
[bs-hk-hk01][WARNIN] Running command: /bin/systemctl enable --runtime ceph-osd@2
[bs-hk-hk01][WARNIN]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@2.service to /usr/lib/systemd/system/ceph-osd@.service.
[bs-hk-hk01][WARNIN] Running command: /bin/systemctl start ceph-osd@2
[bs-hk-hk01][WARNIN] --> ceph-volume lvm activate successful for osd ID: 2
[bs-hk-hk01][WARNIN] --> ceph-volume lvm create successful for: /dev/sdc
[bs-hk-hk01][INFO  ] checking OSD status...
[bs-hk-hk01][DEBUG ] find the location of an executable
[bs-hk-hk01][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host bs-hk-hk01 is now ready for osd use.
[root@bs-k8s-ceph ceph]# ceph-deploy osd create bs-hk-hk01 --data /dev/sdd --journal /dev/sdb1
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create bs-hk-hk01 --data /dev/sdd --journal /dev/sdb1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  bluestore                     : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f90903ad680>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  block_wal                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  journal                       : /dev/sdb1
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  host                          : bs-hk-hk01
[ceph_deploy.cli][INFO  ]  filestore                     : None
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7f90905f78c0>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdd
[ceph_deploy.cli][INFO  ]  block_db                      : None
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdd
[bs-hk-hk01][DEBUG ] connected to host: bs-hk-hk01 
[bs-hk-hk01][DEBUG ] detect platform information from remote host
[bs-hk-hk01][DEBUG ] detect machine type
[bs-hk-hk01][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to bs-hk-hk01
[bs-hk-hk01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[bs-hk-hk01][DEBUG ] find the location of an executable
[bs-hk-hk01][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdd
[bs-hk-hk01][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[bs-hk-hk01][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 7d03f8ee-8ada-485e-a20b-8677a7e57c84
[bs-hk-hk01][WARNIN] Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-4d157a1d-e7e4-4c41-80c9-2596c35345ff /dev/sdd
[bs-hk-hk01][WARNIN]  stdout: Physical volume "/dev/sdd" successfully created.
[bs-hk-hk01][WARNIN]  stdout: Volume group "ceph-4d157a1d-e7e4-4c41-80c9-2596c35345ff" successfully created
[bs-hk-hk01][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-7d03f8ee-8ada-485e-a20b-8677a7e57c84 ceph-4d157a1d-e7e4-4c41-80c9-2596c35345ff
[bs-hk-hk01][WARNIN]  stdout: Logical volume "osd-block-7d03f8ee-8ada-485e-a20b-8677a7e57c84" created.
[bs-hk-hk01][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[bs-hk-hk01][WARNIN] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-3
[bs-hk-hk01][WARNIN] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-3
[bs-hk-hk01][WARNIN] Running command: /bin/chown -h ceph:ceph /dev/ceph-4d157a1d-e7e4-4c41-80c9-2596c35345ff/osd-block-7d03f8ee-8ada-485e-a20b-8677a7e57c84
[bs-hk-hk01][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-1
[bs-hk-hk01][WARNIN] Running command: /bin/ln -s /dev/ceph-4d157a1d-e7e4-4c41-80c9-2596c35345ff/osd-block-7d03f8ee-8ada-485e-a20b-8677a7e57c84 /var/lib/ceph/osd/ceph-3/block
[bs-hk-hk01][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap
[bs-hk-hk01][WARNIN]  stderr: got monmap epoch 1
[bs-hk-hk01][WARNIN] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-3/keyring --create-keyring --name osd.3 --add-key AQCCckJeTm5AMBAAQnCk+nGkKQifYDHOGVRy/w==
[bs-hk-hk01][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-3/keyring
[bs-hk-hk01][WARNIN] added entity osd.3 auth auth(auid = 18446744073709551615 key=AQCCckJeTm5AMBAAQnCk+nGkKQifYDHOGVRy/w== with 0 caps)
[bs-hk-hk01][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/keyring
[bs-hk-hk01][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/
[bs-hk-hk01][WARNIN] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-3/ --osd-uuid 7d03f8ee-8ada-485e-a20b-8677a7e57c84 --setuser ceph --setgroup ceph
[bs-hk-hk01][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdd
[bs-hk-hk01][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3
[bs-hk-hk01][WARNIN] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-4d157a1d-e7e4-4c41-80c9-2596c35345ff/osd-block-7d03f8ee-8ada-485e-a20b-8677a7e57c84 --path /var/lib/ceph/osd/ceph-3 --no-mon-config
[bs-hk-hk01][WARNIN] Running command: /bin/ln -snf /dev/ceph-4d157a1d-e7e4-4c41-80c9-2596c35345ff/osd-block-7d03f8ee-8ada-485e-a20b-8677a7e57c84 /var/lib/ceph/osd/ceph-3/block
[bs-hk-hk01][WARNIN] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block
[bs-hk-hk01][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-1
[bs-hk-hk01][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3
[bs-hk-hk01][WARNIN] Running command: /bin/systemctl enable ceph-volume@lvm-3-7d03f8ee-8ada-485e-a20b-8677a7e57c84
[bs-hk-hk01][WARNIN]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-3-7d03f8ee-8ada-485e-a20b-8677a7e57c84.service to /usr/lib/systemd/system/ceph-volume@.service.
[bs-hk-hk01][WARNIN] Running command: /bin/systemctl enable --runtime ceph-osd@3
[bs-hk-hk01][WARNIN]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@3.service to /usr/lib/systemd/system/ceph-osd@.service.
[bs-hk-hk01][WARNIN] Running command: /bin/systemctl start ceph-osd@3
[bs-hk-hk01][WARNIN] --> ceph-volume lvm activate successful for osd ID: 3
[bs-hk-hk01][WARNIN] --> ceph-volume lvm create successful for: /dev/sdd
[bs-hk-hk01][INFO  ] checking OSD status...
[bs-hk-hk01][DEBUG ] find the location of an executable
[bs-hk-hk01][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host bs-hk-hk01 is now ready for osd use.
[root@bs-k8s-ceph ceph]# ceph-deploy osd create bs-hk-hk02 --data /dev/sdc --journal /dev/sdb1
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create bs-hk-hk02 --data /dev/sdc --journal /dev/sdb1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  bluestore                     : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f90371d0680>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  block_wal                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  journal                       : /dev/sdb1
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  host                          : bs-hk-hk02
[ceph_deploy.cli][INFO  ]  filestore                     : None
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7f903741a8c0>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdc
[ceph_deploy.cli][INFO  ]  block_db                      : None
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdc
[bs-hk-hk02][DEBUG ] connected to host: bs-hk-hk02 
[bs-hk-hk02][DEBUG ] detect platform information from remote host
[bs-hk-hk02][DEBUG ] detect machine type
[bs-hk-hk02][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to bs-hk-hk02
[bs-hk-hk02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[bs-hk-hk02][WARNIN] osd keyring does not exist yet, creating one
[bs-hk-hk02][DEBUG ] create a keyring file
[bs-hk-hk02][DEBUG ] find the location of an executable
[bs-hk-hk02][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdc
[bs-hk-hk02][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[bs-hk-hk02][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 115e2aab-5560-489d-8a8b-d8d6f6351ce8
[bs-hk-hk02][WARNIN] Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-66b51d23-8490-4ecd-a82b-c552eb4e5767 /dev/sdc
[bs-hk-hk02][WARNIN]  stdout: Physical volume "/dev/sdc" successfully created.
[bs-hk-hk02][WARNIN]  stdout: Volume group "ceph-66b51d23-8490-4ecd-a82b-c552eb4e5767" successfully created
[bs-hk-hk02][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-115e2aab-5560-489d-8a8b-d8d6f6351ce8 ceph-66b51d23-8490-4ecd-a82b-c552eb4e5767
[bs-hk-hk02][WARNIN]  stdout: Logical volume "osd-block-115e2aab-5560-489d-8a8b-d8d6f6351ce8" created.
[bs-hk-hk02][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[bs-hk-hk02][WARNIN] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4
[bs-hk-hk02][WARNIN] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-4
[bs-hk-hk02][WARNIN] Running command: /bin/chown -h ceph:ceph /dev/ceph-66b51d23-8490-4ecd-a82b-c552eb4e5767/osd-block-115e2aab-5560-489d-8a8b-d8d6f6351ce8
[bs-hk-hk02][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-0
[bs-hk-hk02][WARNIN] Running command: /bin/ln -s /dev/ceph-66b51d23-8490-4ecd-a82b-c552eb4e5767/osd-block-115e2aab-5560-489d-8a8b-d8d6f6351ce8 /var/lib/ceph/osd/ceph-4/block
[bs-hk-hk02][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap
[bs-hk-hk02][WARNIN]  stderr: got monmap epoch 1
[bs-hk-hk02][WARNIN] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-4/keyring --create-keyring --name osd.4 --add-key AQCcckJeNXjDMBAAOhS0lvDoLHjPaCAuylecsg==
[bs-hk-hk02][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-4/keyring
[bs-hk-hk02][WARNIN] added entity osd.4 auth auth(auid = 18446744073709551615 key=AQCcckJeNXjDMBAAOhS0lvDoLHjPaCAuylecsg== with 0 caps)
[bs-hk-hk02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring
[bs-hk-hk02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/
[bs-hk-hk02][WARNIN] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid 115e2aab-5560-489d-8a8b-d8d6f6351ce8 --setuser ceph --setgroup ceph
[bs-hk-hk02][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdc
[bs-hk-hk02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4
[bs-hk-hk02][WARNIN] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-66b51d23-8490-4ecd-a82b-c552eb4e5767/osd-block-115e2aab-5560-489d-8a8b-d8d6f6351ce8 --path /var/lib/ceph/osd/ceph-4 --no-mon-config
[bs-hk-hk02][WARNIN] Running command: /bin/ln -snf /dev/ceph-66b51d23-8490-4ecd-a82b-c552eb4e5767/osd-block-115e2aab-5560-489d-8a8b-d8d6f6351ce8 /var/lib/ceph/osd/ceph-4/block
[bs-hk-hk02][WARNIN] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block
[bs-hk-hk02][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-0
[bs-hk-hk02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4
[bs-hk-hk02][WARNIN] Running command: /bin/systemctl enable ceph-volume@lvm-4-115e2aab-5560-489d-8a8b-d8d6f6351ce8
[bs-hk-hk02][WARNIN]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-4-115e2aab-5560-489d-8a8b-d8d6f6351ce8.service to /usr/lib/systemd/system/ceph-volume@.service.
[bs-hk-hk02][WARNIN] Running command: /bin/systemctl enable --runtime ceph-osd@4
[bs-hk-hk02][WARNIN]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@4.service to /usr/lib/systemd/system/ceph-osd@.service.
[bs-hk-hk02][WARNIN] Running command: /bin/systemctl start ceph-osd@4
[bs-hk-hk02][WARNIN] --> ceph-volume lvm activate successful for osd ID: 4
[bs-hk-hk02][WARNIN] --> ceph-volume lvm create successful for: /dev/sdc
[bs-hk-hk02][INFO  ] checking OSD status...
[bs-hk-hk02][DEBUG ] find the location of an executable
[bs-hk-hk02][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host bs-hk-hk02 is now ready for osd use.
[root@bs-k8s-ceph ceph]# ceph-deploy osd create bs-hk-hk02 --data /dev/sdd --journal /dev/sdb1
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create bs-hk-hk02 --data /dev/sdd --journal /dev/sdb1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  bluestore                     : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7feaf4a7a680>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  block_wal                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  journal                       : /dev/sdb1
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  host                          : bs-hk-hk02
[ceph_deploy.cli][INFO  ]  filestore                     : None
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7feaf4cc48c0>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdd
[ceph_deploy.cli][INFO  ]  block_db                      : None
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdd
[bs-hk-hk02][DEBUG ] connected to host: bs-hk-hk02 
[bs-hk-hk02][DEBUG ] detect platform information from remote host
[bs-hk-hk02][DEBUG ] detect machine type
[bs-hk-hk02][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to bs-hk-hk02
[bs-hk-hk02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[bs-hk-hk02][DEBUG ] find the location of an executable
[bs-hk-hk02][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdd
[bs-hk-hk02][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[bs-hk-hk02][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 191064b8-92ae-4fa9-895a-671f3d134af2
[bs-hk-hk02][WARNIN] Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-1ff9394c-24bd-4f6c-9f0d-80d7e6a8c316 /dev/sdd
[bs-hk-hk02][WARNIN]  stdout: Physical volume "/dev/sdd" successfully created.
[bs-hk-hk02][WARNIN]  stdout: Volume group "ceph-1ff9394c-24bd-4f6c-9f0d-80d7e6a8c316" successfully created
[bs-hk-hk02][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-191064b8-92ae-4fa9-895a-671f3d134af2 ceph-1ff9394c-24bd-4f6c-9f0d-80d7e6a8c316
[bs-hk-hk02][WARNIN]  stdout: Logical volume "osd-block-191064b8-92ae-4fa9-895a-671f3d134af2" created.
[bs-hk-hk02][WARNIN] Running command: /bin/ceph-authtool --gen-print-key
[bs-hk-hk02][WARNIN] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-5
[bs-hk-hk02][WARNIN] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-5
[bs-hk-hk02][WARNIN] Running command: /bin/chown -h ceph:ceph /dev/ceph-1ff9394c-24bd-4f6c-9f0d-80d7e6a8c316/osd-block-191064b8-92ae-4fa9-895a-671f3d134af2
[bs-hk-hk02][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-1
[bs-hk-hk02][WARNIN] Running command: /bin/ln -s /dev/ceph-1ff9394c-24bd-4f6c-9f0d-80d7e6a8c316/osd-block-191064b8-92ae-4fa9-895a-671f3d134af2 /var/lib/ceph/osd/ceph-5/block
[bs-hk-hk02][WARNIN] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-5/activate.monmap
[bs-hk-hk02][WARNIN]  stderr: got monmap epoch 1
[bs-hk-hk02][WARNIN] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-5/keyring --create-keyring --name osd.5 --add-key AQCtckJeShPOBhAAdZ2Jd9o2oAIXBWmLPb4pBA==
[bs-hk-hk02][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-5/keyring
[bs-hk-hk02][WARNIN] added entity osd.5 auth auth(auid = 18446744073709551615 key=AQCtckJeShPOBhAAdZ2Jd9o2oAIXBWmLPb4pBA== with 0 caps)
[bs-hk-hk02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5/keyring
[bs-hk-hk02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5/
[bs-hk-hk02][WARNIN] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 5 --monmap /var/lib/ceph/osd/ceph-5/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-5/ --osd-uuid 191064b8-92ae-4fa9-895a-671f3d134af2 --setuser ceph --setgroup ceph
[bs-hk-hk02][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdd
[bs-hk-hk02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5
[bs-hk-hk02][WARNIN] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-1ff9394c-24bd-4f6c-9f0d-80d7e6a8c316/osd-block-191064b8-92ae-4fa9-895a-671f3d134af2 --path /var/lib/ceph/osd/ceph-5 --no-mon-config
[bs-hk-hk02][WARNIN] Running command: /bin/ln -snf /dev/ceph-1ff9394c-24bd-4f6c-9f0d-80d7e6a8c316/osd-block-191064b8-92ae-4fa9-895a-671f3d134af2 /var/lib/ceph/osd/ceph-5/block
[bs-hk-hk02][WARNIN] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block
[bs-hk-hk02][WARNIN] Running command: /bin/chown -R ceph:ceph /dev/dm-1
[bs-hk-hk02][WARNIN] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5
[bs-hk-hk02][WARNIN] Running command: /bin/systemctl enable ceph-volume@lvm-5-191064b8-92ae-4fa9-895a-671f3d134af2
[bs-hk-hk02][WARNIN]  stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-5-191064b8-92ae-4fa9-895a-671f3d134af2.service to /usr/lib/systemd/system/ceph-volume@.service.
[bs-hk-hk02][WARNIN] Running command: /bin/systemctl enable --runtime ceph-osd@5
[bs-hk-hk02][WARNIN]  stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@5.service to /usr/lib/systemd/system/ceph-osd@.service.
[bs-hk-hk02][WARNIN] Running command: /bin/systemctl start ceph-osd@5
[bs-hk-hk02][WARNIN] --> ceph-volume lvm activate successful for osd ID: 5
[bs-hk-hk02][WARNIN] --> ceph-volume lvm create successful for: /dev/sdd
[bs-hk-hk02][INFO  ] checking OSD status...
[bs-hk-hk02][DEBUG ] find the location of an executable
[bs-hk-hk02][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host bs-hk-hk02 is now ready for osd use.
[root@bs-k8s-ceph ceph]# ceph -s
  cluster:
    id:     11880418-1a9a-4b55-a353-4b141e2199d8
    health: HEALTH_WARN
            no active mgr
 
  services:
    mon: 3 daemons, quorum bs-hk-hk01,bs-hk-hk02,bs-k8s-ceph
    mgr: no daemons active
    osd: 6 osds: 6 up, 6 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0  objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     
 
[root@bs-k8s-ceph ceph]# chmod +r ceph.client.admin.keyring
[root@bs-k8s-ceph ceph]# ceph-deploy mgr create bs-k8s-ceph bs-hk-hk01 bs-hk-hk02
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy mgr create bs-k8s-ceph bs-hk-hk01 bs-hk-hk02
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  mgr                           : [(bs-k8s-ceph, bs-k8s-ceph), (bs-hk-hk01, bs-hk-hk01), (bs-hk-hk02, bs-hk-hk02)]
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f1bc292c908>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function mgr at 0x7f1bc321c140>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.mgr][DEBUG ] Deploying mgr, cluster ceph hosts bs-k8s-ceph:bs-k8s-ceph bs-hk-hk01:bs-hk-hk01 bs-hk-hk02:bs-hk-hk02
[bs-k8s-ceph][DEBUG ] connected to host: bs-k8s-ceph 
[bs-k8s-ceph][DEBUG ] detect platform information from remote host
[bs-k8s-ceph][DEBUG ] detect machine type
[ceph_deploy.mgr][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.mgr][DEBUG ] remote host will use systemd
[ceph_deploy.mgr][DEBUG ] deploying mgr bootstrap to bs-k8s-ceph
[bs-k8s-ceph][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[bs-k8s-ceph][WARNIN] mgr keyring does not exist yet, creating one
[bs-k8s-ceph][DEBUG ] create a keyring file
[bs-k8s-ceph][DEBUG ] create path recursively if it doesnt exist
[bs-k8s-ceph][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-mgr --keyring /var/lib/ceph/bootstrap-mgr/ceph.keyring auth get-or-create mgr.bs-k8s-ceph mon allow profile mgr osd allow * mds allow * -o /var/lib/ceph/mgr/ceph-bs-k8s-ceph/keyring
[bs-k8s-ceph][INFO  ] Running command: systemctl enable ceph-mgr@bs-k8s-ceph
[bs-k8s-ceph][WARNIN] Created symlink from /etc/systemd/system/ceph-mgr.target.wants/ceph-mgr@bs-k8s-ceph.service to /usr/lib/systemd/system/ceph-mgr@.service.
[bs-k8s-ceph][INFO  ] Running command: systemctl start ceph-mgr@bs-k8s-ceph
[bs-k8s-ceph][INFO  ] Running command: systemctl enable ceph.target
[bs-hk-hk01][DEBUG ] connected to host: bs-hk-hk01 
[bs-hk-hk01][DEBUG ] detect platform information from remote host
[bs-hk-hk01][DEBUG ] detect machine type
[ceph_deploy.mgr][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.mgr][DEBUG ] remote host will use systemd
[ceph_deploy.mgr][DEBUG ] deploying mgr bootstrap to bs-hk-hk01
[bs-hk-hk01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[bs-hk-hk01][WARNIN] mgr keyring does not exist yet, creating one
[bs-hk-hk01][DEBUG ] create a keyring file
[bs-hk-hk01][DEBUG ] create path recursively if it doesnt exist
[bs-hk-hk01][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-mgr --keyring /var/lib/ceph/bootstrap-mgr/ceph.keyring auth get-or-create mgr.bs-hk-hk01 mon allow profile mgr osd allow * mds allow * -o /var/lib/ceph/mgr/ceph-bs-hk-hk01/keyring
[bs-hk-hk01][INFO  ] Running command: systemctl enable ceph-mgr@bs-hk-hk01
[bs-hk-hk01][WARNIN] Created symlink from /etc/systemd/system/ceph-mgr.target.wants/ceph-mgr@bs-hk-hk01.service to /usr/lib/systemd/system/ceph-mgr@.service.
[bs-hk-hk01][INFO  ] Running command: systemctl start ceph-mgr@bs-hk-hk01
[bs-hk-hk01][INFO  ] Running command: systemctl enable ceph.target
[bs-hk-hk02][DEBUG ] connected to host: bs-hk-hk02 
[bs-hk-hk02][DEBUG ] detect platform information from remote host
[bs-hk-hk02][DEBUG ] detect machine type
[ceph_deploy.mgr][INFO  ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.mgr][DEBUG ] remote host will use systemd
[ceph_deploy.mgr][DEBUG ] deploying mgr bootstrap to bs-hk-hk02
[bs-hk-hk02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[bs-hk-hk02][WARNIN] mgr keyring does not exist yet, creating one
[bs-hk-hk02][DEBUG ] create a keyring file
[bs-hk-hk02][DEBUG ] create path recursively if it doesnt exist
[bs-hk-hk02][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-mgr --keyring /var/lib/ceph/bootstrap-mgr/ceph.keyring auth get-or-create mgr.bs-hk-hk02 mon allow profile mgr osd allow * mds allow * -o /var/lib/ceph/mgr/ceph-bs-hk-hk02/keyring
[bs-hk-hk02][INFO  ] Running command: systemctl enable ceph-mgr@bs-hk-hk02
[bs-hk-hk02][WARNIN] Created symlink from /etc/systemd/system/ceph-mgr.target.wants/ceph-mgr@bs-hk-hk02.service to /usr/lib/systemd/system/ceph-mgr@.service.
[bs-hk-hk02][INFO  ] Running command: systemctl start ceph-mgr@bs-hk-hk02
[bs-hk-hk02][INFO  ] Running command: systemctl enable ceph.target
[root@bs-k8s-ceph ceph]# ceph -s
  cluster:
    id:     11880418-1a9a-4b55-a353-4b141e2199d8
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum bs-hk-hk01,bs-hk-hk02,bs-k8s-ceph
    mgr: bs-k8s-ceph(active), standbys: bs-hk-hk01, bs-hk-hk02
    osd: 6 osds: 6 up, 6 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0  objects, 0 B
    usage:   6.0 GiB used, 108 GiB / 114 GiB avail
    pgs:     
 
[root@bs-k8s-ceph ceph]# vim /etc/ceph/ceph.conf 
[root@bs-k8s-ceph ceph]# ceph mgr module enable dashboard
[root@bs-k8s-ceph ceph]# ceph dashboard create-self-signed-cert
Self-signed certificate created
[root@bs-k8s-ceph ceph]# openssl req -new -nodes -x509   -subj "/O=IT/CN=ceph-mgr-dashboard" -days 3650   -keyout dashboard.key -out dashboard.crt -extensions v3_ca
Generating a 2048 bit RSA private key
.............................................................+++
..........+++
writing new private key to dashboard.key
-----
[root@bs-k8s-ceph ceph]# ls
ceph.bootstrap-mds.keyring  ceph.bootstrap-osd.keyring  ceph.client.admin.keyring  ceph-deploy-ceph.log  dashboard.crt  rbdmap
ceph.bootstrap-mgr.keyring  ceph.bootstrap-rgw.keyring  ceph.conf                  ceph.mon.keyring      dashboard.key
[root@bs-k8s-ceph ceph]# ceph -s
  cluster:
    id:     11880418-1a9a-4b55-a353-4b141e2199d8
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum bs-hk-hk01,bs-hk-hk02,bs-k8s-ceph
    mgr: bs-k8s-ceph(active), standbys: bs-hk-hk02, bs-hk-hk01
    osd: 6 osds: 6 up, 6 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0  objects, 0 B
    usage:   6.0 GiB used, 108 GiB / 114 GiB avail
    pgs:     
 
[root@bs-k8s-ceph ceph]#  ceph config set mgr mgr/dashboard/server_addr 0.0.0.0
[root@bs-k8s-ceph ceph]# ceph config set mgr mgr/dashboard/server_port 7000
[root@bs-k8s-ceph ceph]# ceph -s
  cluster:
    id:     11880418-1a9a-4b55-a353-4b141e2199d8
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum bs-hk-hk01,bs-hk-hk02,bs-k8s-ceph
    mgr: bs-k8s-ceph(active), standbys: bs-hk-hk02, bs-hk-hk01
    osd: 6 osds: 6 up, 6 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0  objects, 0 B
    usage:   6.0 GiB used, 108 GiB / 114 GiB avail
    pgs:     
 
[root@bs-k8s-ceph ceph]# ceph dashboard set-login-credentials admin zisefeizhu
Username and password updated
[root@bs-k8s-ceph ceph]# ceph -s
  cluster:
    id:     11880418-1a9a-4b55-a353-4b141e2199d8
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum bs-hk-hk01,bs-hk-hk02,bs-k8s-ceph
    mgr: bs-k8s-ceph(active), standbys: bs-hk-hk02, bs-hk-hk01
    osd: 6 osds: 6 up, 6 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0  objects, 0 B
    usage:   6.0 GiB used, 108 GiB / 114 GiB avail
    pgs:     
 
[root@bs-k8s-ceph ceph]# ceph mgr services
{
    "dashboard": "https://bs-k8s-ceph:8443/"
}
[root@bs-k8s-ceph ceph]# ceph-deploy --overwrite-conf config push bs-hk-hk01
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy --overwrite-conf config push bs-hk-hk01
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : True
[ceph_deploy.cli][INFO  ]  subcommand                    : push
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f1907c07638>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  client                        : [bs-hk-hk01]
[ceph_deploy.cli][INFO  ]  func                          : <function config at 0x7f1907e3ac08>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.config][DEBUG ] Pushing config to bs-hk-hk01
[bs-hk-hk01][DEBUG ] connected to host: bs-hk-hk01 
[bs-hk-hk01][DEBUG ] detect platform information from remote host
[bs-hk-hk01][DEBUG ] detect machine type
[bs-hk-hk01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[root@bs-k8s-ceph ceph]# ceph-deploy --overwrite-conf config push bs-hk-hk02
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy --overwrite-conf config push bs-hk-hk02
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : True
[ceph_deploy.cli][INFO  ]  subcommand                    : push
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fb720986638>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  client                        : [bs-hk-hk02]
[ceph_deploy.cli][INFO  ]  func                          : <function config at 0x7fb720bb9c08>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.config][DEBUG ] Pushing config to bs-hk-hk02
[bs-hk-hk02][DEBUG ] connected to host: bs-hk-hk02 
[bs-hk-hk02][DEBUG ] detect platform information from remote host
[bs-hk-hk02][DEBUG ] detect machine type
[bs-hk-hk02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[root@bs-k8s-ceph ceph]# ceph -s
  cluster:
    id:     11880418-1a9a-4b55-a353-4b141e2199d8
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum bs-hk-hk01,bs-hk-hk02,bs-k8s-ceph
    mgr: bs-k8s-ceph(active), standbys: bs-hk-hk02, bs-hk-hk01
    osd: 6 osds: 6 up, 6 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0  objects, 0 B
    usage:   6.0 GiB used, 108 GiB / 114 GiB avail
    pgs:     
 
[root@bs-k8s-ceph ceph]# ceph osd tree
ID CLASS WEIGHT  TYPE NAME            STATUS REWEIGHT PRI-AFF 
-1       0.11151 root default                                 
-5       0.03717     host bs-hk-hk01                          
 2   hdd 0.01859         osd.2            up  1.00000 1.00000 
 3   hdd 0.01859         osd.3            up  1.00000 1.00000 
-7       0.03717     host bs-hk-hk02                          
 4   hdd 0.01859         osd.4            up  1.00000 1.00000 
 5   hdd 0.01859         osd.5            up  1.00000 1.00000 
-3       0.03717     host bs-k8s-ceph                         
 0   hdd 0.01859         osd.0            up  1.00000 1.00000 
 1   hdd 0.01859         osd.1            up  1.00000 1.00000 
[root@bs-k8s-ceph ceph]# systemctl status ceph-
ceph-crash.service                                              ceph-osd@0.service
ceph-mds.target                                                 ceph-osd@1.service
ceph-mgr@bs-k8s-ceph.service                                    ceph-osd.target
ceph-mgr.target                                                 ceph-volume@lvm-0-37d3767e-4e21-448c-91f4-d3a1079bd8bd.service
ceph-mon@bs-k8s-ceph.service                                    ceph-volume@lvm-1-0f718ba7-de6b-4d22-b74d-90657e220121.service
ceph-mon.target                                                 
[root@bs-k8s-ceph ceph]# systemctl status ceph-osd.target 
● ceph-osd.target - ceph target allowing to start/stop all ceph-osd@.service instances at once
   Loaded: loaded (/usr/lib/systemd/system/ceph-osd.target; enabled; vendor preset: enabled)
   Active: active since 二 2020-02-11 17:17:42 CST; 12min ago

2月 11 17:17:42 bs-k8s-ceph systemd[1]: Reached target ceph target allowing to start/stop all ceph-osd@.service instances at once.
[root@bs-k8s-ceph ceph]# ceph -s
  cluster:
    id:     11880418-1a9a-4b55-a353-4b141e2199d8
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum bs-hk-hk01,bs-hk-hk02,bs-k8s-ceph
    mgr: bs-k8s-ceph(active), standbys: bs-hk-hk02, bs-hk-hk01
    osd: 6 osds: 6 up, 6 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0  objects, 0 B
    usage:   6.0 GiB used, 108 GiB / 114 GiB avail
    pgs:     
 
[root@bs-k8s-ceph ceph]# ceph
[root@bs-k8s-ceph ceph]# systemctl restart ceph-mgr@bs-k8s-ceph.service
[root@bs-k8s-ceph ceph]# ceph mgr services
{
    "dashboard": "https://0.0.0.0:7000/"
}
[root@bs-k8s-ceph ceph]# ceph osd tree
ID CLASS WEIGHT  TYPE NAME            STATUS REWEIGHT PRI-AFF 
-1       0.11151 root default                                 
-5       0.03717     host bs-hk-hk01                          
 2   hdd 0.01859         osd.2            up  1.00000 1.00000 
 3   hdd 0.01859         osd.3            up  1.00000 1.00000 
-7       0.03717     host bs-hk-hk02                          
 4   hdd 0.01859         osd.4            up  1.00000 1.00000 
 5   hdd 0.01859         osd.5            up  1.00000 1.00000 
-3       0.03717     host bs-k8s-ceph                         
 0   hdd 0.01859         osd.0            up  1.00000 1.00000 
 1   hdd 0.01859         osd.1            up  1.00000 1.00000 
[root@bs-k8s-ceph ceph]# ceph -s
  cluster:
    id:     11880418-1a9a-4b55-a353-4b141e2199d8
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum bs-hk-hk01,bs-hk-hk02,bs-k8s-ceph
    mgr: bs-k8s-ceph(active), standbys: bs-hk-hk02, bs-hk-hk01
    osd: 6 osds: 6 up, 6 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0  objects, 0 B
    usage:   6.0 GiB used, 108 GiB / 114 GiB avail
    pgs:     
 
[root@bs-k8s-ceph ceph]# ceph osd pool create rbd 128   
pool rbd created
[root@bs-k8s-ceph ceph]# ceph osd pool get rbd pg_num
pg_num: 128
[root@bs-k8s-ceph ceph]# ceph -s
  cluster:
    id:     11880418-1a9a-4b55-a353-4b141e2199d8
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum bs-hk-hk01,bs-hk-hk02,bs-k8s-ceph
    mgr: bs-k8s-ceph(active), standbys: bs-hk-hk02, bs-hk-hk01
    osd: 6 osds: 6 up, 6 in
 
  data:
    pools:   1 pools, 128 pgs
    objects: 0  objects, 0 B
    usage:   6.0 GiB used, 108 GiB / 114 GiB avail
    pgs:     128 active+clean

 

Ceph 13.2.8 三节点部署

标签:etc   health   local   ORC   mat   其他   mac   fstab   trap   

原文地址:https://www.cnblogs.com/zisefeizhu/p/12318430.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!