entrypoint.sh 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. #!/bin/bash
  2. set -e
  3. : ${CLUSTER:=ceph}
  4. : ${RGW_NAME:=$(hostname -s)}
  5. : ${MON_NAME:=$(hostname -s)}
  6. : ${RGW_CIVETWEB_PORT:=80}
  7. : ${OSD_SIZE:=100}
  8. : ${KEYSTONE_ADMIN_TOKEN:=admin}
  9. : ${KEYSTONE_ADMIN_PORT:=35357}
  10. : ${KEYSTONE_PUBLIC_PORT:=5001}
  11. : ${KEYSTONE_SERVICE:=${CLUSTER}}
  12. : ${KEYSTONE_ENDPOINT_REGION:=region}
  13. : ${KEYSTONE_ADMIN_USER:=admin}
  14. : ${KEYSTONE_ADMIN_TENANT:=admin}
  15. : ${KEYSTONE_ADMIN_PASS:=admin}
  16. ip_address=$(head -n1 /etc/hosts | cut -d" " -f1)
  17. : ${MON_IP:=${ip_address}}
  18. subnet=$(ip route | grep "src ${ip_address}" | cut -d" " -f1)
  19. : ${CEPH_NETWORK:=${subnet}}
  20. #######
  21. # MON #
  22. #######
  23. if [ ! -n "$CEPH_NETWORK" ]; then
  24. echo "ERROR- CEPH_NETWORK must be defined as the name of the network for the OSDs"
  25. exit 1
  26. fi
  27. if [ ! -n "$MON_IP" ]; then
  28. echo "ERROR- MON_IP must be defined as the IP address of the monitor"
  29. exit 1
  30. fi
  31. # bootstrap MON
  32. if [ ! -e /etc/ceph/ceph.conf ]; then
  33. fsid=$(uuidgen)
  34. cat <<ENDHERE >/etc/ceph/${CLUSTER}.conf
  35. [global]
  36. fsid = $fsid
  37. mon initial members = ${MON_NAME}
  38. mon host = ${MON_IP}
  39. auth cluster required = cephx
  40. auth service required = cephx
  41. auth client required = cephx
  42. osd crush chooseleaf type = 0
  43. osd journal size = 100
  44. osd pool default pg num = 8
  45. osd pool default pgp num = 8
  46. osd pool default size = 1
  47. public network = ${CEPH_NETWORK}
  48. cluster network = ${CEPH_NETWORK}
  49. debug ms = 1
  50. [mon]
  51. debug mon = 20
  52. debug paxos = 20
  53. debug auth = 20
  54. [osd]
  55. debug osd = 20
  56. debug filestore = 20
  57. debug journal = 20
  58. debug monc = 20
  59. [mds]
  60. debug mds = 20
  61. debug mds balancer = 20
  62. debug mds log = 20
  63. debug mds migrator = 20
  64. [client.radosgw.gateway]
  65. rgw keystone url = http://${MON_IP}:${KEYSTONE_ADMIN_PORT}
  66. rgw keystone admin token = ${KEYSTONE_ADMIN_TOKEN}
  67. rgw keystone accepted roles = _member_
  68. ENDHERE
  69. # Generate administrator key
  70. ceph-authtool /etc/ceph/${CLUSTER}.client.admin.keyring --create-keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'
  71. # Generate the mon. key
  72. ceph-authtool /etc/ceph/${CLUSTER}.mon.keyring --create-keyring --gen-key -n mon. --cap mon 'allow *'
  73. # Generate initial monitor map
  74. monmaptool --create --add ${MON_NAME} ${MON_IP} --fsid ${fsid} /etc/ceph/monmap
  75. fi
  76. # If we don't have a monitor keyring, this is a new monitor
  77. if [ ! -e /var/lib/ceph/mon/${CLUSTER}-${MON_NAME}/keyring ]; then
  78. if [ ! -e /etc/ceph/${CLUSTER}.client.admin.keyring ]; then
  79. echo "ERROR- /etc/ceph/${CLUSTER}.client.admin.keyring must exist; get it from your existing mon"
  80. exit 2
  81. fi
  82. if [ ! -e /etc/ceph/${CLUSTER}.mon.keyring ]; then
  83. echo "ERROR- /etc/ceph/${CLUSTER}.mon.keyring must exist. You can extract it from your current monitor by running 'ceph auth get mon. -o /tmp/${CLUSTER}.mon.keyring'"
  84. exit 3
  85. fi
  86. if [ ! -e /etc/ceph/monmap ]; then
  87. echo "ERROR- /etc/ceph/monmap must exist. You can extract it from your current monitor by running 'ceph mon getmap -o /tmp/monmap'"
  88. exit 4
  89. fi
  90. # Import the client.admin keyring and the monitor keyring into a new, temporary one
  91. ceph-authtool /tmp/${CLUSTER}.mon.keyring --create-keyring --import-keyring /etc/ceph/${CLUSTER}.client.admin.keyring
  92. ceph-authtool /tmp/${CLUSTER}.mon.keyring --import-keyring /etc/ceph/${CLUSTER}.mon.keyring
  93. # Make the monitor directory
  94. mkdir -p /var/lib/ceph/mon/${CLUSTER}-${MON_NAME}
  95. # Prepare the monitor daemon's directory with the map and keyring
  96. ceph-mon --mkfs -i ${MON_NAME} --monmap /etc/ceph/monmap --keyring /tmp/${CLUSTER}.mon.keyring
  97. # Clean up the temporary key
  98. rm /tmp/${CLUSTER}.mon.keyring
  99. fi
  100. # start MON
  101. ceph-mon -i ${MON_NAME} --public-addr ${MON_IP}:6789
  102. # change replica size
  103. ceph osd pool set rbd size 1
  104. #######
  105. # OSD #
  106. #######
  107. if [ ! -e /var/lib/ceph/osd/${CLUSTER}-0/keyring ]; then
  108. # bootstrap OSD
  109. mkdir -p /var/lib/ceph/osd/${CLUSTER}-0
  110. # skip btrfs HACK if btrfs is already in place
  111. if [ "$(stat -f /var/lib/ceph/osd/${CLUSTER}-0 2>/dev/null | grep btrfs | wc -l)" == "0" ]; then
  112. # HACK create btrfs loopback device
  113. echo "creating osd storage image"
  114. dd if=/dev/zero of=/tmp/osddata bs=1M count=${OSD_SIZE}
  115. mkfs.btrfs /tmp/osddata
  116. echo "mounting via loopback"
  117. mount -o loop /tmp/osddata /var/lib/ceph/osd/${CLUSTER}-0
  118. echo "now mounted:"
  119. mount
  120. # end HACK
  121. fi
  122. echo "creating osd"
  123. ceph osd create
  124. echo "creating osd filesystem"
  125. ceph-osd -i 0 --mkfs
  126. echo "creating osd keyring"
  127. ceph auth get-or-create osd.0 osd 'allow *' mon 'allow profile osd' -o /var/lib/ceph/osd/${CLUSTER}-0/keyring
  128. echo "configuring osd crush"
  129. ceph osd crush add 0 1 root=default host=$(hostname -s)
  130. echo "adding osd keyring"
  131. ceph-osd -i 0 -k /var/lib/ceph/osd/${CLUSTER}-0/keyring
  132. fi
  133. # start OSD
  134. echo "starting osd"
  135. ceph-osd --cluster=${CLUSTER} -i 0
  136. #sleep 10
  137. #######
  138. # MDS #
  139. #######
  140. if [ ! -e /var/lib/ceph/mds/${CLUSTER}-0/keyring ]; then
  141. # create ceph filesystem
  142. echo "creating osd pool"
  143. ceph osd pool create cephfs_data 8
  144. echo "creating osd pool metadata"
  145. ceph osd pool create cephfs_metadata 8
  146. echo "creating cephfs"
  147. ceph fs new cephfs cephfs_metadata cephfs_data
  148. # bootstrap MDS
  149. mkdir -p /var/lib/ceph/mds/${CLUSTER}-0
  150. echo "creating mds auth"
  151. ceph auth get-or-create mds.0 mds 'allow' osd 'allow *' mon 'allow profile mds' > /var/lib/ceph/mds/${CLUSTER}-0/keyring
  152. fi
  153. # start MDS
  154. echo "starting mds"
  155. ceph-mds --cluster=${CLUSTER} -i 0
  156. #sleep 10
  157. #######
  158. # RGW #
  159. #######
  160. if [ ! -e /var/lib/ceph/radosgw/${RGW_NAME}/keyring ]; then
  161. # bootstrap RGW
  162. mkdir -p /var/lib/ceph/radosgw/${RGW_NAME}
  163. echo "creating rgw auth"
  164. ceph auth get-or-create client.radosgw.gateway osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/${RGW_NAME}/keyring
  165. fi
  166. # start RGW
  167. echo "starting rgw"
  168. radosgw -c /etc/ceph/ceph.conf -n client.radosgw.gateway -k /var/lib/ceph/radosgw/${RGW_NAME}/keyring --rgw-socket-path="" --rgw-frontends="civetweb port=${RGW_CIVETWEB_PORT}"
  169. #######
  170. # API #
  171. #######
  172. # start ceph-rest-api
  173. echo "starting rest api"
  174. ceph-rest-api -n client.admin &
  175. ############
  176. # Keystone #
  177. ############
  178. if [ ! -e /etc/keystone/${CLUSTER}.conf ]; then
  179. cat <<ENDHERE > /etc/keystone/${CLUSTER}.conf
  180. [DEFAULT]
  181. admin_token=${KEYSTONE_ADMIN_TOKEN}
  182. admin_port=${KEYSTONE_ADMIN_PORT}
  183. public_port=${KEYSTONE_PUBLIC_PORT}
  184. [database]
  185. connection = sqlite:////var/lib/keystone/keystone.db
  186. ENDHERE
  187. # start Keystone
  188. echo "starting keystone"
  189. keystone-all --config-file /etc/keystone/${CLUSTER}.conf &
  190. # wait until up
  191. while ! nc ${MON_IP} ${KEYSTONE_ADMIN_PORT} </dev/null; do
  192. sleep 1
  193. done
  194. export OS_SERVICE_TOKEN=${KEYSTONE_ADMIN_TOKEN}
  195. export OS_SERVICE_ENDPOINT=http://${MON_IP}:${KEYSTONE_ADMIN_PORT}/v2.0
  196. echo "creating keystone service ${KEYSTONE_SERVICE}"
  197. keystone service-create --name ${KEYSTONE_SERVICE} --type object-store
  198. echo "creating keystone endpoint ${KEYSTONE_SERVICE}"
  199. keystone endpoint-create --service ${KEYSTONE_SERVICE} \
  200. --region ${KEYSTONE_ENDPOINT_REGION} \
  201. --publicurl http://${MON_IP}:${RGW_CIVETWEB_PORT}/swift/v1 \
  202. --internalurl http://${MON_IP}:${RGW_CIVETWEB_PORT}/swift/v1 \
  203. --adminurl http://${MON_IP}:${RGW_CIVETWEB_PORT}/swift/v1
  204. echo "creating keystone user ${KEYSTONE_ADMIN_USER}"
  205. keystone user-create --name=${KEYSTONE_ADMIN_USER} --pass=${KEYSTONE_ADMIN_PASS} --email=dev@null.com
  206. echo "creating keystone tenant ${KEYSTONE_ADMIN_TENANT}"
  207. keystone tenant-create --name=${KEYSTONE_ADMIN_TENANT} --description=admin
  208. echo "adding keystone role _member_"
  209. keystone user-role-add --user=${KEYSTONE_ADMIN_USER} --tenant=${KEYSTONE_ADMIN_TENANT} --role=_member_
  210. echo "creating keystone role admin"
  211. keystone role-create --name=admin
  212. echo "adding keystone role admin"
  213. keystone user-role-add --user=${KEYSTONE_ADMIN_USER} --tenant=${KEYSTONE_ADMIN_TENANT} --role=admin
  214. fi
  215. #########
  216. # WATCH #
  217. #########
  218. echo "watching ceph"
  219. exec ceph -w