RHCS 2.4 is released on 6th September 2017.
This blog is based on official RHCS installation guide 

1. Prerequisites

1] Adjusting PID count (on OSD servers)
/etc/sysctl.conf
kernel.pid_max = 4194303
# sysctl -p
# sysctl -a | grep kernel.pid_max
=== TIP ===
for i in $(cat /tmp/ceph.hosts)
do sysctl -a |grep pid_max
done
2] ENABLING REPOSITORIES
subscription-manager repos –enable=rhel-7-server-rpms
subscription-manager repos –enable=rhel-7-server-extras-rpms
subscription-manager repos –enable=rhel-7-server-optional-rpms
subscription-manager repos –enable=rhel-7-server-rhceph-2-mon-rpms
subscription-manager repos –enable=rhel-7-server-rhceph-2-osd-rpms
subscription-manager repos –enable=rhel-7-server-rhceph-2-tools-rpms
subscription-manager repos –enable=rhel-7-server-rhscon-2-agent-rpms
subscription-manager repos –enable=rhel-7-server-rhscon-2-installer-rpms
subscription-manager repos –enable=rhel-7-server-rhscon-2-main-rpms
mount /dev/sr0 /mnt/rhcs2
mount /dev/sr1 /mnt/rhscon2
mount -t iso9660 -o loop path/to/image.iso /mnt/iso
# cat /etc/yum.repos.d/ceph.repo
[rhcs2-mon]
name=rhcs-mon
baseurl=file:///mnt/rhcs2/MON
enabled=1
gpgcheck=0
[rhcs2-osd]
name=rhcs2-osd
baseurl=file:///mnt/rhcs2/OSD
enabled=1
gpgcheck=0
[rhcs2-tools]
name=rhcs2-tools
baseurl=file:///mnt/rhcs2/Tools
enabled=1
gpgcheck=0
[rhscon2-agent]
name=rhscon2-agent
baseurl=file:///mnt/rhscon2/Agent
enabled=1
gpgcheck=0
[rhscon2-installer]
name=rhscon2-installer
baseurl=file:///mnt/rhscon2/Installer
enabled=1
gpgcheck=0
[rhscon2-main]
name=rhscon2-main
baseurl=file:///mnt/rhscon2/Main
enabled=1
gpgcheck=0

3] CONFIGURING FIREWALL (if necessary)
[1] MON
systemctl enable firewalld
systemctl start firewalld
firewall-cmd –zone=public –add-port=6789/tcp
firewall-cmd –zone=public –add-port=6789/tcp –permanent
firewall-cmd –zone=public –add-port=8002/tcp
firewall-cmd –zone=public –add-port=8002/tcp –permanent
[2] OSD
systemctl enable firewalld
systemctl start firewalld
firewall-cmd –zone=public –add-port=6800-7300/tcp
firewall-cmd –zone=public –add-port=6800-7300/tcp –permanent
[3] RGW
systemctl enable firewalld
systemctl start firewalld
firewall-cmd –zone=public –add-port=7480/tcp
firewall-cmd –zone=public –add-port=7480/tcp –permanent
[4] CONSOLE
systemctl enable firewalld
systemctl start firewalld
firewall-cmd –zone=public –add-port=80/tcp
firewall-cmd –zone=public –add-port=80/tcp –permanent
firewall-cmd –zone=public –add-port=10443/tcp
firewall-cmd –zone=public –add-port=10443/tcp –permanent
firewall-cmd –zone=public –add-port=8080/tcp
firewall-cmd –zone=public –add-port=8080/tcp –permanent
firewall-cmd –zone=public –add-port=8181/tcp
firewall-cmd –zone=public –add-port=8181/tcp –permanent
firewall-cmd –zone=public –add-port=4505-4506/tcp
firewall-cmd –zone=public –add-port=4505-4506/tcp –permanent
firewall-cmd –zone=public –add-port=10080/tcp
firewall-cmd –zone=public –add-port=10080/tcp –permanent
firewall-cmd –zone=public –add-port=2003/tcp
firewall-cmd –zone=public –add-port=2003/tcp –permanent
firewall-cmd –zone=public –add-service=ceph-installer
firewall-cmd –zone=public –add-service=ceph-installer –permanent
firewall-cmd –zone=public –add-port=443/tcp
firewall-cmd –zone=public –add-port=443/tcp –permanent

4] CONFIGURING NETWORK TIME PROTOCOL(NTP)
yum -y install ntp
systemctl enable ntpd
systemctl start ntpd
ntpq -p
5] CREATING AN ANSIBLE USER (ANSIBLE DEPLOYMENT ONLY)
Create a new Ansible user and set a new password for this user (on all nodes)
# useradd <username>
# passwd <username>
# cat << EOF >/etc/sudoers.d/<username>
<username> ALL = (root) NOPASSWD:ALL
EOF
# chmod 0440 /etc/sudoers.d/<username>

2. INSTALLING RED HAT CEPH STORAGE USING ANSIBLE


1] Create Ansible user and allow sudo permissions
# useradd ansible
# echo ansible | passwd –stdin ansible
# echo “ansible ALL=(ALL) NOPASSWD:ALL” > /etc/sudoers.d/ansible
# su ansible
# ssh-keygen (by ansible user)
# for i in $(cat /tmp/ceph.hosts)
do
ssh-copy-id $i
done
2] INSTALLING RED HAT CEPH STORAGE USING ANSIBLE
Install the ceph-ansible package on admin node
# yum install ceph-ansible  (from rhscon2-Installer repo)
3] add the Ceph hosts to the/etc/ansible/hosts file
# cat /etc/ansible/hosts
[osds]
osd[1:4]
[mons]
mon1
osd1
admin
[clients]
client01
nova-client01
4] Configuring Ceph Global Settings
# cd ~
# mkdir ceph-ansible-keys
5] As root, create a symbolic link to the Ansible group_vars directory in the /etc/ansible/ directory:
# ln -s /usr/share/ceph-ansible/group_vars /etc/ansible/group_vars
6] edit all.yml osds.yml mons.yml
7] edit /usr/share/ceph-ansible/ansible.cfg  to add
retry_files_save_path = ~/
8] As root, create a site.yml file from the /usr/share/ceph-ansible/site.yml.sample file:
cp site.yml.sample site.yml
9] As root, on the Ceph Monitor nodes, create a Calamari user:
calamari-ctl add_user –password <password> –email <email_address> <user_name>
10] deploy
cd /usr/share/ceph-ansible
# ansible-playbook site.yml

3. INSTALLING RED HAT STORAGE CONSOLE

yum install rhscon-core rhscon-ceph rhscon-ui (on console node)
skyring-setup (on console node)
yum install -y rhscon-agent (on MON and OSD nodes)
curl <console_node>:8181/setup/agent/ | bash (on MON and OSD nodes)
yum install calamari-server (on one of MON node)
calamari-ctl clear –yes-i-am-sure (on console node)
calamari-ctl initialize –admin-username admin –admin-password admin –admin-email admin@redhat.com (on console node)