관리 메뉴

AI 세상 ?

ceph 설치 요약 본문

Linux/Open Source

ceph 설치 요약

phpdoumi 2016. 7. 6. 10:38

admin, node1, node2, node3 4node ubuntu 16.04 기본 + openssh-server 설치 후,


[[[ All node ]]]

root@cephadmin:~# apt-get install chrony


root@cephadmin:~# apt-get install python-ceph

Reading package lists... Done

Building dependency tree

Reading state information... Done

The following additional packages will be installed:

  libboost-iostreams1.58.0 libboost-random1.58.0 libboost-system1.58.0

  libboost-thread1.58.0 libcephfs1 libnspr4 libnss3 libnss3-nssdb

  libpython-stdlib libpython2.7-minimal libpython2.7-stdlib librados2 librbd1

  python python-cephfs python-minimal python-rados python-rbd python2.7

  python2.7-minimal

Suggested packages:

  python-doc python-tk python2.7-doc binutils binfmt-support

The following NEW packages will be installed:

  libboost-iostreams1.58.0 libboost-random1.58.0 libboost-system1.58.0

  libboost-thread1.58.0 libcephfs1 libnspr4 libnss3 libnss3-nssdb

  libpython-stdlib libpython2.7-minimal libpython2.7-stdlib librados2 librbd1

  python python-ceph python-cephfs python-minimal python-rados python-rbd

  python2.7 python2.7-minimal

0 upgraded, 21 newly installed, 0 to remove and 0 not upgraded.

Need to get 11.9 MB of archives.

After this operation, 45.2 MB of additional disk space will be used.

Do you want to continue? [Y/n]


root@admin:~# echo -e 'Defaults:ubuntu !requiretty\nubuntu ALL = (root) NOPASSWD:ALL' | tee /etc/sudoers.d/ceph

Defaults:ubuntu !requiretty
ubuntu ALL = (root) NOPASSWD:ALL

root@admin:~# chmod 440 /etc/sudoers.d/ceph

[[[ Admin Node ]]]
ubuntu@admin:~$ ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/home/ubuntu/.ssh/id_rsa):
Created directory '/home/ubuntu/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/ubuntu/.ssh/id_rsa.
Your public key has been saved in /home/ubuntu/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:9/2tIdpvbgbsDniw2QU3i6m8+t+dqs+E/pqw8aoH+zQ ubuntu@admin
The key's randomart image is:
+---[RSA 2048]----+
|                 |
|                 |
|          . o    |
|           = o   |
|        S +.o    |
|      .. O +o.   |
|       oE =.+.o  |
|      ...X O.o++.|
|      o**oO*OB*.o|
+----[SHA256]-----+

ubuntu@admin:~$ cat .ssh/config

Host admin

        Hostname admin

        User ubuntu

Host node1

        Hostname node1

        User ubuntu

Host node2

        Hostname node2

        User ubuntu

Host node3

        Hostname node3

        User ubuntu


ubuntu@cephadmin:~$ cat /etc/hosts
ubuntu@admin:~$ cat /etc/hosts
127.0.0.1       localhost
172.24.4.227    admin
172.24.4.237    node1
172.24.4.228    node2
172.24.4.229    node3

# The following lines are desirable for IPv6 capable hosts
::1     localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters


ubuntu@admin:~$ ssh-copy-id node1
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/home/ubuntu/.ssh/id_rsa.pub"
The authenticity of host 'node1 (172.24.4.237)' can't be established.
ECDSA key fingerprint is SHA256:cru0V5GhyWfldZkD3A7uVsn7I+/gPQc6E7T+Lqkv3i8.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
ubuntu@node1's password:

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'node1'"
and check to make sure that only the key(s) you wanted were added.

ubuntu@admin:~$ ssh-copy-id node2
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/home/ubuntu/.ssh/id_rsa.pub"
The authenticity of host 'node2 (172.24.4.228)' can't be established.
ECDSA key fingerprint is SHA256:STMs/34PlFl9sMlMj43rDn0YGn5HPwrkKhmkUD6Bp/I.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
ubuntu@node2's password:

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'node2'"
and check to make sure that only the key(s) you wanted were added.

ubuntu@admin:~$ ssh-copy-id node3
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/home/ubuntu/.ssh/id_rsa.pub"
The authenticity of host 'node3 (172.24.4.229)' can't be established.
ECDSA key fingerprint is SHA256:OT2trF3gHJ+6Ia6YJ92zbkOp0qYB+Xr/XR8qKdK1RhQ.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
ubuntu@node3's password:

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'node3'"
and check to make sure that only the key(s) you wanted were added.

여기서 clone 떠 놓고 다시 시작.

ubuntu@admin:~$ sudo apt-get install ceph-deploy ceph-common ceph-mds

Reading package lists... Done

Building dependency tree

Reading state information... Done

The following additional packages will be installed:

  binutils ceph ceph-fs-common ceph-fuse cryptsetup-bin gdisk

  javascript-common libaio1 libbabeltrace-ctf1 libbabeltrace1

  libboost-program-options1.58.0 libboost-regex1.58.0 libfcgi0ldbl

  libgoogle-perftools4 libjs-jquery libleveldb1v5 libopts25 libperl5.22

  libradosstriper1 libreadline5 librgw2 libsnappy1v5 libtcmalloc-minimal4

  libunwind8 ntp perl perl-modules-5.22 python-blinker python-cffi-backend

  python-chardet python-cryptography python-enum34 python-flask python-idna

  python-ipaddress python-itsdangerous python-jinja2 python-markupsafe

  python-ndg-httpsclient python-openssl python-pkg-resources python-pyasn1

  python-pyinotify python-requests python-six python-urllib3 python-werkzeug

  rename xfsprogs

Suggested packages:

  binutils-doc apache2 | lighttpd | httpd ntp-doc perl-doc

  libterm-readline-gnu-perl | libterm-readline-perl-perl make

  python-blinker-doc python-cryptography-doc python-cryptography-vectors

  python-enum34-doc python-flask-doc python-jinja2-doc python-openssl-doc

  python-openssl-dbg python-setuptools doc-base python-pyinotify-doc

  python-ntlm ipython python-genshi python-lxml python-greenlet python-redis

  python-pylibmc | python-memcache python-werkzeug-doc xfsdump acl attr quota

The following NEW packages will be installed:

  binutils ceph ceph-common ceph-deploy ceph-fs-common ceph-fuse ceph-mds

  cryptsetup-bin gdisk javascript-common libaio1 libbabeltrace-ctf1

  libbabeltrace1 libboost-program-options1.58.0 libboost-regex1.58.0

  libfcgi0ldbl libgoogle-perftools4 libjs-jquery libleveldb1v5 libopts25

  libperl5.22 libradosstriper1 libreadline5 librgw2 libsnappy1v5

  libtcmalloc-minimal4 libunwind8 ntp perl perl-modules-5.22 python-blinker

  python-cffi-backend python-chardet python-cryptography python-enum34

  python-flask python-idna python-ipaddress python-itsdangerous python-jinja2

  python-markupsafe python-ndg-httpsclient python-openssl python-pkg-resources

  python-pyasn1 python-pyinotify python-requests python-six python-urllib3

  python-werkzeug rename xfsprogs

0 upgraded, 52 newly installed, 0 to remove and 0 not upgraded.

Need to get 47.6 MB of archives.

After this operation, 227 MB of additional disk space will be used.

Do you want to continue? [Y/n]


ubuntu@admin:~$ mkdir ceph

ubuntu@admin:~$ cd ceph/

ubuntu@admin:~/ceph$ ceph-deploy new node1
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/ubuntu/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.32): /usr/bin/ceph-deploy new node1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fe193793b00>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  ssh_copykey                   : True
[ceph_deploy.cli][INFO  ]  mon                           : ['node1']
[ceph_deploy.cli][INFO  ]  func                          : <function new at 0x7fe193e1ff50>
[ceph_deploy.cli][INFO  ]  public_network                : None
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  cluster_network               : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  fsid                          : None
[ceph_deploy.new][DEBUG ] Creating new cluster named ceph
[ceph_deploy.new][INFO  ] making sure passwordless SSH succeeds
[node1][DEBUG ] connected to host: admin
[node1][INFO  ] Running command: ssh -CT -o BatchMode=yes node1
[node1][DEBUG ] connection detected need for sudo
[node1][DEBUG ] connected to host: node1
[node1][DEBUG ] detect platform information from remote host
[node1][DEBUG ] detect machine type
[node1][DEBUG ] find the location of an executable
[node1][INFO  ] Running command: sudo /bin/ip link show
[node1][INFO  ] Running command: sudo /bin/ip addr show
[node1][DEBUG ] IP addresses found: ['172.24.4.237']
[ceph_deploy.new][DEBUG ] Resolving host node1
[ceph_deploy.new][DEBUG ] Monitor node1 at 172.24.4.237
[ceph_deploy.new][DEBUG ] Monitor initial members are ['node1']
[ceph_deploy.new][DEBUG ] Monitor addrs are ['172.24.4.237']
[ceph_deploy.new][DEBUG ] Creating a random mon key...
[ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring...
[ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf...

ubuntu@admin:~/ceph$ vi ceph.conf
osd pool default size = 2 ### 추가.

ubuntu@admin:~/ceph$ ceph-deploy install admin node1 node2 node3

ubuntu@admin:~/ceph$ ceph-deploy mon create-initial
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/ubuntu/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.32): /usr/bin/ceph-deploy mon create-initial
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : create-initial
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f0231282f38>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function mon at 0x7f0231261e60>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  keyrings                      : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.mon][DEBUG ] Deploying mon, cluster ceph hosts node1
[ceph_deploy.mon][DEBUG ] detecting platform for host node1 ...
[node1][DEBUG ] connection detected need for sudo
[node1][DEBUG ] connected to host: node1
[node1][DEBUG ] detect platform information from remote host
[node1][DEBUG ] detect machine type
[node1][DEBUG ] find the location of an executable
[ceph_deploy.mon][INFO  ] distro info: Ubuntu 16.04 xenial
[node1][DEBUG ] determining if provided host has same hostname in remote
[node1][DEBUG ] get remote short hostname
[node1][DEBUG ] deploying mon to node1
[node1][DEBUG ] get remote short hostname
[node1][DEBUG ] remote hostname: node1
[node1][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node1][DEBUG ] create the mon path if it does not exist
[node1][DEBUG ] checking for done path: /var/lib/ceph/mon/ceph-node1/done
[node1][DEBUG ] done path does not exist: /var/lib/ceph/mon/ceph-node1/done
[node1][INFO  ] creating keyring file: /var/lib/ceph/tmp/ceph-node1.mon.keyring
[node1][DEBUG ] create the monitor keyring file
[node1][INFO  ] Running command: sudo ceph-mon --cluster ceph --mkfs -i node1 --keyring /var/lib/ceph/tmp/ceph-node1.mon.keyring --setuser 64045 --setgroup 64045
[node1][DEBUG ] ceph-mon: mon.noname-a 172.24.4.237:6789/0 is local, renaming to mon.node1
[node1][DEBUG ] ceph-mon: set fsid to d7c1dfe7-362d-482a-93df-81628acadffa
[node1][DEBUG ] ceph-mon: created monfs at /var/lib/ceph/mon/ceph-node1 for mon.node1
[node1][INFO  ] unlinking keyring file /var/lib/ceph/tmp/ceph-node1.mon.keyring
[node1][DEBUG ] create a done file to avoid re-doing the mon deployment
[node1][DEBUG ] create the init path if it does not exist
[node1][INFO  ] Running command: sudo systemctl enable ceph.target
[node1][WARNIN] Created symlink from /etc/systemd/system/multi-user.target.wants/ceph.target to /lib/systemd/system/ceph.target.
[node1][INFO  ] Running command: sudo systemctl enable ceph-mon@node1
[node1][WARNIN] Created symlink from /etc/systemd/system/ceph-mon.target.wants/ceph-mon@node1.service to /lib/systemd/system/ceph-mon@.service.
[node1][INFO  ] Running command: sudo systemctl start ceph-mon@node1
[node1][INFO  ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node1.asok mon_status
[node1][DEBUG ] ********************************************************************************
[node1][DEBUG ] status for monitor: mon.node1
[node1][DEBUG ] {
[node1][DEBUG ]   "election_epoch": 3,
[node1][DEBUG ]   "extra_probe_peers": [],
[node1][DEBUG ]   "monmap": {
[node1][DEBUG ]     "created": "2016-07-05 17:37:32.325134",
[node1][DEBUG ]     "epoch": 1,
[node1][DEBUG ]     "fsid": "d7c1dfe7-362d-482a-93df-81628acadffa",
[node1][DEBUG ]     "modified": "2016-07-05 17:37:32.325134",
[node1][DEBUG ]     "mons": [
[node1][DEBUG ]       {
[node1][DEBUG ]         "addr": "172.24.4.237:6789/0",
[node1][DEBUG ]         "name": "node1",
[node1][DEBUG ]         "rank": 0
[node1][DEBUG ]       }
[node1][DEBUG ]     ]
[node1][DEBUG ]   },
[node1][DEBUG ]   "name": "node1",
[node1][DEBUG ]   "outside_quorum": [],
[node1][DEBUG ]   "quorum": [
[node1][DEBUG ]     0
[node1][DEBUG ]   ],
[node1][DEBUG ]   "rank": 0,
[node1][DEBUG ]   "state": "leader",
[node1][DEBUG ]   "sync_provider": []
[node1][DEBUG ] }
[node1][DEBUG ] ********************************************************************************
[node1][INFO  ] monitor: mon.node1 is running
[node1][INFO  ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node1.asok mon_status
[ceph_deploy.mon][INFO  ] processing monitor mon.node1
[node1][DEBUG ] connection detected need for sudo
[node1][DEBUG ] connected to host: node1
[node1][DEBUG ] detect platform information from remote host
[node1][DEBUG ] detect machine type
[node1][DEBUG ] find the location of an executable
[node1][INFO  ] Running command: sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.node1.asok mon_status
[ceph_deploy.mon][INFO  ] mon.node1 monitor has reached quorum!
[ceph_deploy.mon][INFO  ] all initial monitors are running and have formed quorum
[ceph_deploy.mon][INFO  ] Running gatherkeys...
[ceph_deploy.gatherkeys][DEBUG ] Checking node1 for /etc/ceph/ceph.client.admin.keyring
[node1][DEBUG ] connection detected need for sudo
[node1][DEBUG ] connected to host: node1
[node1][DEBUG ] detect platform information from remote host
[node1][DEBUG ] detect machine type
[node1][DEBUG ] fetch remote file
[ceph_deploy.gatherkeys][DEBUG ] Got ceph.client.admin.keyring key from node1.
[ceph_deploy.gatherkeys][DEBUG ] Have ceph.mon.keyring
[ceph_deploy.gatherkeys][DEBUG ] Checking node1 for /var/lib/ceph/bootstrap-osd/ceph.keyring
[node1][DEBUG ] connection detected need for sudo
[node1][DEBUG ] connected to host: node1
[node1][DEBUG ] detect platform information from remote host
[node1][DEBUG ] detect machine type
[node1][DEBUG ] fetch remote file
[ceph_deploy.gatherkeys][DEBUG ] Got ceph.bootstrap-osd.keyring key from node1.
[ceph_deploy.gatherkeys][DEBUG ] Checking node1 for /var/lib/ceph/bootstrap-mds/ceph.keyring
[node1][DEBUG ] connection detected need for sudo
[node1][DEBUG ] connected to host: node1
[node1][DEBUG ] detect platform information from remote host
[node1][DEBUG ] detect machine type
[node1][DEBUG ] fetch remote file
[ceph_deploy.gatherkeys][DEBUG ] Got ceph.bootstrap-mds.keyring key from node1.
[ceph_deploy.gatherkeys][DEBUG ] Checking node1 for /var/lib/ceph/bootstrap-rgw/ceph.keyring
[node1][DEBUG ] connection detected need for sudo
[node1][DEBUG ] connected to host: node1
[node1][DEBUG ] detect platform information from remote host
[node1][DEBUG ] detect machine type
[node1][DEBUG ] fetch remote file
[ceph_deploy.gatherkeys][DEBUG ] Got ceph.bootstrap-rgw.keyring key from node1.

[[[ node1, node2, node3 ]]]
root@node1:~# fdisk /dev/sda
n ## all enter
root@node1:~# mkfs.xfs /dev/sda1
meta-data=/dev/sda1              isize=512    agcount=4, agsize=1966016 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=1, sparse=0
data     =                       bsize=4096   blocks=7864064, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=3839, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0

root@node1:~# cat /etc/fstab
# /etc/fstab: static file system information.
#
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
# <file system> <mount point>   <type>  <options>       <dump>  <pass>
# / was on /dev/vda5 during installation
UUID=9fc209a4-36b3-41cc-bfd9-4c6895e45906 /               ext4    errors=remount-ro 0       1
# /boot was on /dev/vda1 during installation
UUID=31435047-6f9a-4859-98d6-87fbe2891c31 /boot           ext4    defaults        0       2
/dev/sda1       /storage1       xfs     defaults 0 2

root@node1:~# mkdir /storage1

root@node1:~# mount -a

root@node1:~# df -h
Filesystem      Size  Used Avail Use% Mounted on
udev            984M     0  984M   0% /dev
tmpfs           201M  3.2M  197M   2% /run
/dev/vda5       9.3G  1.4G  7.5G  16% /
tmpfs          1001M     0 1001M   0% /dev/shm
tmpfs           5.0M     0  5.0M   0% /run/lock
tmpfs          1001M     0 1001M   0% /sys/fs/cgroup
/dev/vda1       464M   52M  385M  12% /boot
tmpfs           201M     0  201M   0% /run/user/1000
/dev/sda1        30G   33M   30G   1% /storage1

root@node1:~# chown -R ceph:ceph /storage1

[[[ admin node ]]]
ubuntu@admin:~/ceph$ ceph-deploy osd prepare node1:/storage1 node2:/storage2 node3:/storage3

ubuntu@admin:~/ceph$ ceph-deploy osd activate node1:/storage1 node2:/storage2 node3:/storage3

ubuntu@admin:~/ceph$ ceph-deploy admin admin node1 node2 node3
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/ubuntu/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.32): /usr/bin/ceph-deploy admin admin node1 node2 node3
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f03ea2e6fc8>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  client                        : ['admin', 'node1', 'node2', 'node3']
[ceph_deploy.cli][INFO  ]  func                          : <function admin at 0x7f03eab91aa0>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to admin
[admin][DEBUG ] connection detected need for sudo
[admin][DEBUG ] connected to host: admin
[admin][DEBUG ] detect platform information from remote host
[admin][DEBUG ] detect machine type
[admin][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to node1
[node1][DEBUG ] connection detected need for sudo
[node1][DEBUG ] connected to host: node1
[node1][DEBUG ] detect platform information from remote host
[node1][DEBUG ] detect machine type
[node1][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to node2
[node2][DEBUG ] connection detected need for sudo
[node2][DEBUG ] connected to host: node2
[node2][DEBUG ] detect platform information from remote host
[node2][DEBUG ] detect machine type
[node2][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph_deploy.admin][DEBUG ] Pushing admin keys and conf to node3
[node3][DEBUG ] connection detected need for sudo
[node3][DEBUG ] connected to host: node3
[node3][DEBUG ] detect platform information from remote host
[node3][DEBUG ] detect machine type
[node3][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf

ubuntu@admin:~/ceph$ sudo ceph health
HEALTH_OK

root@admin:~# ceph osd tree
ID WEIGHT  TYPE NAME      UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 0.08789 root default
-2 0.02930     host node1
 0 0.02930         osd.0       up  1.00000          1.00000
-3 0.02930     host node2
 1 0.02930         osd.1       up  1.00000          1.00000
-4 0.02930     host node3
 2 0.02930         osd.2       up  1.00000          1.00000

root@admin:~# ceph -s
    cluster d7c1dfe7-362d-482a-93df-81628acadffa
     health HEALTH_OK
     monmap e1: 1 mons at {node1=172.24.4.237:6789/0}
            election epoch 3, quorum 0 node1
     osdmap e16: 3 osds: 3 up, 3 in
            flags sortbitwise
      pgmap v29: 64 pgs, 1 pools, 0 bytes data, 0 objects
            15459 MB used, 76652 MB / 92112 MB avail
                  64 active+clean

[[[ client 생성 후 ]]]
ubuntu@admin:~/ceph$ ceph-deploy mds create node1
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/ubuntu/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.32): /usr/bin/ceph-deploy mds create node1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f1b87d3e170>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function mds at 0x7f1b87f9e230>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  mds                           : [('node1', 'node1')]
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.mds][DEBUG ] Deploying mds, cluster ceph hosts node1:node1
[node1][DEBUG ] connection detected need for sudo
[node1][DEBUG ] connected to host: node1
[node1][DEBUG ] detect platform information from remote host
[node1][DEBUG ] detect machine type
[ceph_deploy.mds][INFO  ] Distro info: Ubuntu 16.04 xenial
[ceph_deploy.mds][DEBUG ] remote host will use systemd
[ceph_deploy.mds][DEBUG ] deploying mds bootstrap to node1
[node1][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[node1][DEBUG ] create path if it doesn't exist
[node1][INFO  ] Running command: sudo ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.node1 osd allow rwx mds allow mon allow profile mds -o /var/lib/ceph/mds/ceph-node1/keyring
[node1][INFO  ] Running command: sudo systemctl enable ceph-mds@node1
[node1][WARNIN] Created symlink from /etc/systemd/system/ceph-mds.target.wants/ceph-mds@node1.service to /lib/systemd/system/ceph-mds@.service.
[node1][INFO  ] Running command: sudo systemctl start ceph-mds@node1
[node1][INFO  ] Running command: sudo systemctl enable ceph.target

ubuntu@node1:~$ sudo ceph osd pool create cephfs_data 128
pool 'cephfs_data' created

ubuntu@node1:~$ sudo ceph osd pool create cephfs_metadata 128
pool 'cephfs_metadata' created

ubuntu@node1:~$ sudo ceph fs new cephfs cephfs_metadata cephfs_data
new fs with metadata pool 2 and data pool 1

ubuntu@node1:~$ sudo ceph fs ls
name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]

ubuntu@node1:~$ sudo ceph mds stat
e5: 1/1/1 up {1:0=node1=up:active}

ubuntu@client:~$ sudo apt-get install ceph-fs-common ceph-fuse
Reading package lists... Done
Building dependency tree
Reading state information... Done
Suggested packages:
  ceph-mds
The following NEW packages will be installed:
  ceph-fs-common ceph-fuse
0 upgraded, 2 newly installed, 0 to remove and 0 not upgraded.
Need to get 1820 kB of archives.
After this operation, 6870 kB of additional disk space will be used.

root@client:~# ssh ubuntu@node1 "sudo ceph-authtool -p /etc/ceph/ceph.client.admin.keyring" > admin.key

The authenticity of host 'node1 (172.24.4.237)' can't be established.

ECDSA key fingerprint is SHA256:cru0V5GhyWfldZkD3A7uVsn7I+/gPQc6E7T+Lqkv3i8.

Are you sure you want to continue connecting (yes/no)? yes

Warning: Permanently added 'node1,172.24.4.237' (ECDSA) to the list of known hosts.

ubuntu@node1's password:


root@client:~# ls

admin.key


root@client:~# chmod 600 admin.key


root@client:~# mount -t ceph node1:6789:/ /mnt -o name=admin,secretfile=admin.key


root@client:~# df -h

Filesystem           Size  Used Avail Use% Mounted on

udev                 984M     0  984M   0% /dev

tmpfs                201M  3.2M  197M   2% /run

/dev/vda5            9.3G  1.2G  7.7G  13% /

tmpfs               1001M     0 1001M   0% /dev/shm

tmpfs                5.0M     0  5.0M   0% /run/lock

tmpfs               1001M     0 1001M   0% /sys/fs/cgroup

/dev/vda1            464M   52M  385M  12% /boot

tmpfs                201M     0  201M   0% /run/user/1000

172.24.4.237:6789:/   90G   16G   75G  17% /mnt


root@client:/mnt# df -hT

Filesystem          Type      Size  Used Avail Use% Mounted on

udev                devtmpfs  984M     0  984M   0% /dev

tmpfs               tmpfs     201M  3.2M  197M   2% /run

/dev/vda5           ext4      9.3G  1.2G  7.7G  13% /

tmpfs               tmpfs    1001M     0 1001M   0% /dev/shm

tmpfs               tmpfs     5.0M     0  5.0M   0% /run/lock

tmpfs               tmpfs    1001M     0 1001M   0% /sys/fs/cgroup

/dev/vda1           ext4      464M   52M  385M  12% /boot

tmpfs               tmpfs     201M     0  201M   0% /run/user/1000

172.24.4.237:6789:/ ceph       90G   16G   75G  17% /mnt


root@client:~# cat /etc/fstab

node1:6789:/    /mnt    ceph    name=admin,secretfile=/root/admin.key,noatime  0 2


root@admin:~# ceph osd tree

ID WEIGHT  TYPE NAME      UP/DOWN REWEIGHT PRIMARY-AFFINITY

-1 0.11719 root default

-2 0.05859     host node1

 0 0.02930         osd.0       up  1.00000          1.00000

 3 0.02930         osd.3       up  1.00000          1.00000

-3 0.02930     host node2

 1 0.02930         osd.1       up  1.00000          1.00000

-4 0.02930     host node3

 2 0.02930         osd.2       up  1.00000          1.00000

root@admin:~# ceph osd out 3

marked out osd.3.


ubuntu@admin:~$ sudo ceph osd tree
ID WEIGHT  TYPE NAME      UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 0.11719 root default
-2 0.05859     host node1
 0 0.02930         osd.0       up  1.00000          1.00000
 3 0.02930         osd.3       up        0          1.00000
-3 0.02930     host node2
 1 0.02930         osd.1       up  1.00000          1.00000
-4 0.02930     host node3
 2 0.02930         osd.2       up  1.00000          1.00000

ubuntu@admin:~$ sudo ceph osd in 3

marked in osd.3.