添加osd节点

OSD node

create cpeh user

[root@ceph-node4 ~]# useradd ceph

config sudo

# echo "ceph ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph
# chmod 0440 /etc/sudoers.d/ceph
# su - ceph
$ fdisk -l
$ sudo fdisk -l

Disk /dev/sda: 299.4 GB, 299439751168 bytes, 584843264 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000a9e9f

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1   *        2048      411647      204800   83  Linux
/dev/sda2          411648    17188863     8388608   82  Linux swap / Solaris
/dev/sda3        17188864   226904063   104857600   83  Linux

Disk /dev/sdb: 299.4 GB, 299439751168 bytes, 584843264 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

# 

TTY config

# visudo
# Defaults    requiretty
Defaults:ceph !requiretty

Monitor node

无密码登录

# su - ceph
$ ssh-copy-id ceph-node4

部署ceph osd node

$ ceph-deploy install ceph-node4
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.31): /bin/ceph-deploy install ceph-node4
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  testing                       : None
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x21ba0e0>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  dev_commit                    : None
[ceph_deploy.cli][INFO  ]  install_mds                   : False
[ceph_deploy.cli][INFO  ]  stable                        : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  adjust_repos                  : False
[ceph_deploy.cli][INFO  ]  func                          : <function install at 0x212b230>
[ceph_deploy.cli][INFO  ]  install_all                   : False
[ceph_deploy.cli][INFO  ]  repo                          : False
[ceph_deploy.cli][INFO  ]  host                          : ['ceph-node4']
[ceph_deploy.cli][INFO  ]  install_rgw                   : False
[ceph_deploy.cli][INFO  ]  install_tests                 : False
[ceph_deploy.cli][INFO  ]  repo_url                      : None
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  install_osd                   : False
[ceph_deploy.cli][INFO  ]  version_kind                  : stable
[ceph_deploy.cli][INFO  ]  install_common                : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  dev                           : master
[ceph_deploy.cli][INFO  ]  local_mirror                  : None
[ceph_deploy.cli][INFO  ]  release                       : None
[ceph_deploy.cli][INFO  ]  install_mon                   : False
[ceph_deploy.cli][INFO  ]  gpg_url                       : None
[ceph_deploy.install][DEBUG ] Installing stable version infernalis on cluster ceph hosts ceph-node4
[ceph_deploy.install][DEBUG ] Detecting platform for host ceph-node4 ...
[ceph-node4][DEBUG ] connection detected need for sudo
[ceph-node4][DEBUG ] connected to host: ceph-node4 
[ceph-node4][DEBUG ] detect platform information from remote host
[ceph-node4][DEBUG ] detect machine type
[ceph_deploy.install][INFO  ] Distro info: CentOS Linux 7.3.1611 Core
[ceph-node4][INFO  ] installing Ceph on ceph-node4
[ceph-node4][INFO  ] Running command: sudo yum clean all
[ceph-node4][DEBUG ] Loaded plugins: fastestmirror
[ceph-node4][DEBUG ] Cleaning repos: base centos-ceph-hammer centos-openstack-mitaka centos-qemu-ev
[ceph-node4][DEBUG ]               : extras updates
[ceph-node4][DEBUG ] Cleaning up everything
[ceph-node4][DEBUG ] Cleaning up list of fastest mirrors
[ceph-node4][INFO  ] Running command: sudo yum -y install ceph ceph-radosgw
[ceph-node4][DEBUG ] Loaded plugins: fastestmirror
[ceph-node4][DEBUG ] Determining fastest mirrors
[ceph-node4][DEBUG ]  * base: mirrors.yun-idc.com
[ceph-node4][DEBUG ]  * extras: mirror.bit.edu.cn
[ceph-node4][DEBUG ]  * updates: mirror.bit.edu.cn
[ceph-node4][DEBUG ] Package 1:ceph-0.94.9-0.el7.x86_64 already installed and latest version
[ceph-node4][DEBUG ] Package 1:ceph-radosgw-0.94.9-0.el7.x86_64 already installed and latest version
[ceph-node4][DEBUG ] Nothing to do
[ceph-node4][INFO  ] Running command: sudo ceph --version
[ceph-node4][DEBUG ] ceph version 0.94.9 (fe6d859066244b97b24f09d46552afc2071e6f90)

擦净磁盘

$  ceph-deploy disk zap ceph-node4:sdb
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.31): /bin/ceph-deploy disk zap ceph-node4:sdb
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : zap
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x155e200>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function disk at 0x1553c80>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  disk                          : [('ceph-node4', '/dev/sdb', None)]
[ceph_deploy.osd][DEBUG ] zapping /dev/sdb on ceph-node4
[ceph-node4][DEBUG ] connection detected need for sudo
[ceph-node4][DEBUG ] connected to host: ceph-node4 
[ceph-node4][DEBUG ] detect platform information from remote host
[ceph-node4][DEBUG ] detect machine type
[ceph-node4][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.3.1611 Core
[ceph-node4][DEBUG ] zeroing last few blocks of device
[ceph-node4][DEBUG ] find the location of an executable
[ceph-node4][INFO  ] Running command: sudo /usr/sbin/ceph-disk zap /dev/sdb
[ceph-node4][DEBUG ] Creating new GPT entries.
[ceph-node4][DEBUG ] GPT data structures destroyed! You may now partition the disk using fdisk or
[ceph-node4][DEBUG ] other utilities.
[ceph-node4][DEBUG ] Creating new GPT entries.
[ceph-node4][DEBUG ] The operation has completed successfully.
[ceph-node4][WARNIN] partx: specified range <1:0> does not make sense
[ceph_deploy.osd][INFO  ] calling partx on zapped device /dev/sdb
[ceph_deploy.osd][INFO  ] re-reading known partitions will display errors
[ceph-node4][INFO  ] Running command: sudo partx -a /dev/sdb


准备OSD

如果这个不成功就自己格式化分区

$  ceph-deploy osd prepare ceph-node4:sdb
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.31): /bin/ceph-deploy osd prepare ceph-node4:sdb
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  disk                          : [('ceph-node4', '/dev/sdb', None)]
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : prepare
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0xf66368>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0xf59c08>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.osd][DEBUG ] Preparing cluster ceph disks ceph-node4:/dev/sdb:
[ceph-node4][DEBUG ] connection detected need for sudo
[ceph-node4][DEBUG ] connected to host: ceph-node4 
[ceph-node4][DEBUG ] detect platform information from remote host
[ceph-node4][DEBUG ] detect machine type
[ceph-node4][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.3.1611 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to ceph-node4
[ceph-node4][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph-node4][WARNIN] osd keyring does not exist yet, creating one
[ceph-node4][DEBUG ] create a keyring file
[ceph_deploy.osd][DEBUG ] Preparing host ceph-node4 disk /dev/sdb journal None activate False
[ceph-node4][INFO  ] Running command: sudo ceph-disk -v prepare --cluster ceph --fs-type xfs -- /dev/sdb
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=fsid
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_options_xfs
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=osd_journal_size
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_cryptsetup_parameters
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_dmcrypt_key_size
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_dmcrypt_type
[ceph-node4][WARNIN] INFO:ceph-disk:Will colocate journal with data on /dev/sdb
[ceph-node4][WARNIN] DEBUG:ceph-disk:Creating journal partition num 2 size 5120 on /dev/sdb
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /sbin/sgdisk --new=2:0:5120M --change-name=2:ceph journal --partition-guid=2:d0fd988b-ac61-4fc3-8da6-4b1eb6e4f79d --typecode=2:45b0969e-9b03-4f30-b4c6-b4b80ceff106 --mbrtogpt -- /dev/sdb
[ceph-node4][DEBUG ] The operation has completed successfully.
[ceph-node4][WARNIN] INFO:ceph-disk:calling partx on prepared device /dev/sdb
[ceph-node4][WARNIN] INFO:ceph-disk:re-reading known partitions will display errors
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /sbin/partx -a /dev/sdb
[ceph-node4][WARNIN] partx: /dev/sdb: error adding partition 2
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/udevadm settle
[ceph-node4][WARNIN] DEBUG:ceph-disk:Journal is GPT partition /dev/disk/by-partuuid/d0fd988b-ac61-4fc3-8da6-4b1eb6e4f79d
[ceph-node4][WARNIN] DEBUG:ceph-disk:Journal is GPT partition /dev/disk/by-partuuid/d0fd988b-ac61-4fc3-8da6-4b1eb6e4f79d
[ceph-node4][WARNIN] DEBUG:ceph-disk:Creating osd partition on /dev/sdb
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /sbin/sgdisk --largest-new=1 --change-name=1:ceph data --partition-guid=1:bf022224-3703-452f-9187-cbbcfb6b0454 --typecode=1:89c57f98-2fe5-4dc0-89c1-f3ad0ceff2be -- /dev/sdb
[ceph-node4][DEBUG ] The operation has completed successfully.
[ceph-node4][WARNIN] INFO:ceph-disk:calling partx on created device /dev/sdb
[ceph-node4][WARNIN] INFO:ceph-disk:re-reading known partitions will display errors
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /sbin/partx -a /dev/sdb
[ceph-node4][WARNIN] partx: /dev/sdb: error adding partitions 1-2
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/udevadm settle
[ceph-node4][WARNIN] DEBUG:ceph-disk:Creating xfs fs on /dev/sdb1
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /sbin/mkfs -t xfs -f -d agcount=8 -i size=2048 -f -- /dev/sdb1
[ceph-node4][DEBUG ] meta-data=/dev/sdb1              isize=2048   agcount=8, agsize=8974304 blks
[ceph-node4][DEBUG ]          =                       sectsz=512   attr=2, projid32bit=1
[ceph-node4][DEBUG ]          =                       crc=1        finobt=0, sparse=0
[ceph-node4][DEBUG ] data     =                       bsize=4096   blocks=71794427, imaxpct=25
[ceph-node4][DEBUG ]          =                       sunit=0      swidth=0 blks
[ceph-node4][DEBUG ] naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
[ceph-node4][DEBUG ] log      =internal log           bsize=4096   blocks=35055, version=2
[ceph-node4][DEBUG ]          =                       sectsz=512   sunit=0 blks, lazy-count=1
[ceph-node4][DEBUG ] realtime =none                   extsz=4096   blocks=0, rtextents=0
[ceph-node4][WARNIN] DEBUG:ceph-disk:Mounting /dev/sdb1 on /var/lib/ceph/tmp/mnt.gcwRRg with options rw,nodev,noatime,nobarrier,noexec,inode64
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/mount -t xfs -o rw,nodev,noatime,nobarrier,noexec,inode64 -- /dev/sdb1 /var/lib/ceph/tmp/mnt.gcwRRg
[ceph-node4][WARNIN] DEBUG:ceph-disk:Preparing osd data dir /var/lib/ceph/tmp/mnt.gcwRRg
[ceph-node4][WARNIN] DEBUG:ceph-disk:Creating symlink /var/lib/ceph/tmp/mnt.gcwRRg/journal -> /dev/disk/by-partuuid/d0fd988b-ac61-4fc3-8da6-4b1eb6e4f79d
[ceph-node4][WARNIN] DEBUG:ceph-disk:Unmounting /var/lib/ceph/tmp/mnt.gcwRRg
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /bin/umount -- /var/lib/ceph/tmp/mnt.gcwRRg
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /sbin/sgdisk --typecode=1:4fbd7e29-9d25-41b8-afd0-062c0ceff05d -- /dev/sdb
[ceph-node4][DEBUG ] Warning: The kernel is still using the old partition table.
[ceph-node4][DEBUG ] The new table will be used at the next reboot.
[ceph-node4][DEBUG ] The operation has completed successfully.
[ceph-node4][WARNIN] INFO:ceph-disk:calling partx on prepared device /dev/sdb
[ceph-node4][WARNIN] INFO:ceph-disk:re-reading known partitions will display errors
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /sbin/partx -a /dev/sdb
[ceph-node4][WARNIN] partx: /dev/sdb: error adding partitions 1-2
[ceph-node4][INFO  ] checking OSD status...
[ceph-node4][INFO  ] Running command: sudo ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host ceph-node4 is now ready for osd use.

激活osd

# 先分区该硬盘

[ceph@ceph-node1 ceph-cluster]$ ceph-deploy osd activate ceph-node4:sdb1
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.31): /bin/ceph-deploy osd activate ceph-node4:sdb1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : activate
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x10bd368>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x10b0c08>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  disk                          : [('ceph-node4', '/dev/sdb1', None)]
[ceph_deploy.osd][DEBUG ] Activating cluster ceph disks ceph-node4:/dev/sdb1:
[ceph-node4][DEBUG ] connection detected need for sudo
[ceph-node4][DEBUG ] connected to host: ceph-node4 
[ceph-node4][DEBUG ] detect platform information from remote host
[ceph-node4][DEBUG ] detect machine type
[ceph-node4][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.3.1611 Core
[ceph_deploy.osd][DEBUG ] activating host ceph-node4 disk /dev/sdb1
[ceph_deploy.osd][DEBUG ] will use init type: sysvinit
[ceph-node4][INFO  ] Running command: sudo ceph-disk -v activate --mark-init sysvinit --mount /dev/sdb1
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /sbin/blkid -p -s TYPE -ovalue -- /dev/sdb1
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs
[ceph-node4][WARNIN] DEBUG:ceph-disk:Mounting /dev/sdb1 on /var/lib/ceph/tmp/mnt.EFaq9Z with options rw,nodev,noatime,nobarrier,noexec,inode64
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/mount -t xfs -o rw,nodev,noatime,nobarrier,noexec,inode64 -- /dev/sdb1 /var/lib/ceph/tmp/mnt.EFaq9Z
[ceph-node4][WARNIN] DEBUG:ceph-disk:Cluster uuid is 632c3580-e5fd-42d6-9493-2ec31e357778
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=fsid
[ceph-node4][WARNIN] DEBUG:ceph-disk:Cluster name is ceph
[ceph-node4][WARNIN] DEBUG:ceph-disk:OSD uuid is 20434581-5e94-434c-b07b-21f99f38f1de
[ceph-node4][WARNIN] DEBUG:ceph-disk:Allocating OSD id...
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd create --concise 20434581-5e94-434c-b07b-21f99f38f1de
[ceph-node4][WARNIN] DEBUG:ceph-disk:OSD id is 4
[ceph-node4][WARNIN] DEBUG:ceph-disk:Initializing OSD...
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/tmp/mnt.EFaq9Z/activate.monmap
[ceph-node4][WARNIN] got monmap epoch 1
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --cluster ceph --mkfs --mkkey -i 4 --monmap /var/lib/ceph/tmp/mnt.EFaq9Z/activate.monmap --osd-data /var/lib/ceph/tmp/mnt.EFaq9Z --osd-journal /var/lib/ceph/tmp/mnt.EFaq9Z/journal --osd-uuid 20434581-5e94-434c-b07b-21f99f38f1de --keyring /var/lib/ceph/tmp/mnt.EFaq9Z/keyring
[ceph-node4][WARNIN] 2017-03-24 13:28:51.689590 7fe79e493880 -1 journal FileJournal::_open: disabling aio for non-block journal.  Use journal_force_aio to force use of aio anyway
[ceph-node4][WARNIN] 2017-03-24 13:28:51.786210 7fe79e493880 -1 journal FileJournal::_open: disabling aio for non-block journal.  Use journal_force_aio to force use of aio anyway
[ceph-node4][WARNIN] 2017-03-24 13:28:51.793850 7fe79e493880 -1 filestore(/var/lib/ceph/tmp/mnt.EFaq9Z) could not find -1/23c2fcde/osd_superblock/0 in index: (2) No such file or directory
[ceph-node4][WARNIN] 2017-03-24 13:28:51.922633 7fe79e493880 -1 created object store /var/lib/ceph/tmp/mnt.EFaq9Z journal /var/lib/ceph/tmp/mnt.EFaq9Z/journal for osd.4 fsid 791c2ef6-bc56-43b0-b2c7-0cd863621040
[ceph-node4][WARNIN] 2017-03-24 13:28:51.922708 7fe79e493880 -1 auth: error reading file: /var/lib/ceph/tmp/mnt.EFaq9Z/keyring: can't open /var/lib/ceph/tmp/mnt.EFaq9Z/keyring: (2) No such file or directory
[ceph-node4][WARNIN] 2017-03-24 13:28:51.922931 7fe79e493880 -1 created new key in keyring /var/lib/ceph/tmp/mnt.EFaq9Z/keyring
[ceph-node4][WARNIN] DEBUG:ceph-disk:Marking with init system sysvinit
[ceph-node4][WARNIN] DEBUG:ceph-disk:Authorizing OSD key...
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring auth add osd.4 -i /var/lib/ceph/tmp/mnt.EFaq9Z/keyring osd allow * mon allow profile osd
[ceph-node4][WARNIN] added key for osd.4
[ceph-node4][WARNIN] DEBUG:ceph-disk:ceph osd.4 data dir is ready at /var/lib/ceph/tmp/mnt.EFaq9Z
[ceph-node4][WARNIN] DEBUG:ceph-disk:Moving mount to final location...
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /bin/mount -o rw,nodev,noatime,nobarrier,noexec,inode64 -- /dev/sdb1 /var/lib/ceph/osd/ceph-4
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /bin/umount -l -- /var/lib/ceph/tmp/mnt.EFaq9Z
[ceph-node4][WARNIN] DEBUG:ceph-disk:Starting ceph osd.4...
[ceph-node4][WARNIN] INFO:ceph-disk:Running command: /usr/sbin/service ceph --cluster ceph start osd.4
[ceph-node4][DEBUG ] === osd.4 === 
[ceph-node4][DEBUG ] Starting Ceph osd.4 on ceph-node4...
[ceph-node4][WARNIN] Running as unit ceph-osd.4.1490333332.568382221.service.
[ceph-node4][INFO  ] checking OSD status...
[ceph-node4][INFO  ] Running command: sudo ceph --cluster=ceph osd stat --format=json
[ceph-node4][INFO  ] Running command: sudo systemctl enable ceph
[ceph-node4][WARNIN] ceph.service is not a native service, redirecting to /sbin/chkconfig.
[ceph-node4][WARNIN] Executing /sbin/chkconfig ceph on


add osd to curshmap

docs:http://docs.ceph.org.cn/rados/operations/add-or-rm-osds/

命令格式:ceph osd crush add (osd.name|id) (weight) (host=ceph.osd.hostname)


ceph osd crush add osd.2 0.5 host=ceph-node2

ceph osd crush add osd.4 0.3 host=ceph-node4


config osd set root

docs:http://docs.ceph.org.cn/rados/operations/crush-map/#addosd

$ ceph osd crush set osd.4 0.3 host=ceph-node4 root=default 
posted @ 2017-06-13 15:12  zhaogaolong  阅读(383)  评论(0编辑  收藏  举报