[root@controller2 ~]# tail -f /var/log/keystone/keystone-startup.log
_setup_logging_from_conf(product_name, version)
File "/usr/lib/python2.6/site-packages/keystone/openstack/common/log.py", line 525, in _setup_logging_from_conf
filelog = logging.handlers.WatchedFileHandler(logpath)
File "/usr/lib64/python2.6/logging/handlers.py", line 377, in __init__
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
File "/usr/lib64/python2.6/logging/__init__.py", line 827, in __init__
StreamHandler.__init__(self, self._open())
File "/usr/lib64/python2.6/logging/__init__.py", line 846, in _open
stream = open(self.baseFilename, self.mode)
IOError: [Errno 13] Permission denied: '/var/log/keystone/keystone.log'
chown keystone.keystone /var/log/keystone/keystone.log
Saturday, June 28, 2014
Thursday, June 26, 2014
Virtual Ip With Keepalived as Front end for HAproxy server's
Install Keepalived
Virtual Ip 192.168.216.100
HAproxy Ip 192.168.216.101
1. Install Keepalived package:
On RHEL/CentOS:
$ yum install -y centos-release
$ yum install -y keepalived
$ chkconfig keepalived on
2. Tell kernel to allow binding non-local IP into the hosts and apply the changes:
$ echo "net.ipv4.ip_nonlocal_bind = 1" >> /etc/sysctl.conf
$ sysctl -p
Configure Keepalived and Virtual IP
1. Login into LB1 and add following line into /etc/keepalived/keepalived.conf:
vrrp_script chk_haproxy {
script "killall -0 haproxy" # verify the pid existance
interval 2 # check every 2 seconds
weight 2 # add 2 points of prio if OK
}
vrrp_instance VI_1 {
interface eth2 # interface to monitor
state MASTER
virtual_router_id 51 # Assign one ID for this route
priority 101 # 101 on master, 100 on backup
virtual_ipaddress {
192.168.216.100 # the virtual IP
}
track_script {
chk_haproxy
}
}
2. Login into LB2 and add following line into /etc/keepalived/keepalived.conf:
vrrp_script chk_haproxy {
script "killall -0 haproxy" # verify the pid existance
interval 2 # check every 2 seconds
weight 2 # add 2 points of prio if OK
}
vrrp_instance VI_1 {
interface eth2 # interface to monitor
state MASTER
virtual_router_id 51 # Assign one ID for this route
priority 100 # 101 on master, 100 on backup
virtual_ipaddress {
192.168.216.100 # the virtual IP
}
track_script {
chk_haproxy
}
}
3. Start Keepalived in both nodes:
$ sudo /etc/init.d/keepalived start
4. Verify the Keepalived status. LB1 should hold the VIP and the MASTER state while LB2 should run as BACKUP state without VIP:
LB1 IP:
$ ip a | grep -e inet.*eth2
inet 192.168.216.101/24 brd 192.168.216.255 scope global eth2
inet 192.168.216.100/32 scope global eth2
Virtual Ip 192.168.216.100
HAproxy Ip 192.168.216.101
1. Install Keepalived package:
On RHEL/CentOS:
$ yum install -y centos-release
$ yum install -y keepalived
$ chkconfig keepalived on
2. Tell kernel to allow binding non-local IP into the hosts and apply the changes:
$ echo "net.ipv4.ip_nonlocal_bind = 1" >> /etc/sysctl.conf
$ sysctl -p
Configure Keepalived and Virtual IP
1. Login into LB1 and add following line into /etc/keepalived/keepalived.conf:
vrrp_script chk_haproxy {
script "killall -0 haproxy" # verify the pid existance
interval 2 # check every 2 seconds
weight 2 # add 2 points of prio if OK
}
vrrp_instance VI_1 {
interface eth2 # interface to monitor
state MASTER
virtual_router_id 51 # Assign one ID for this route
priority 101 # 101 on master, 100 on backup
virtual_ipaddress {
192.168.216.100 # the virtual IP
}
track_script {
chk_haproxy
}
}
2. Login into LB2 and add following line into /etc/keepalived/keepalived.conf:
vrrp_script chk_haproxy {
script "killall -0 haproxy" # verify the pid existance
interval 2 # check every 2 seconds
weight 2 # add 2 points of prio if OK
}
vrrp_instance VI_1 {
interface eth2 # interface to monitor
state MASTER
virtual_router_id 51 # Assign one ID for this route
priority 100 # 101 on master, 100 on backup
virtual_ipaddress {
192.168.216.100 # the virtual IP
}
track_script {
chk_haproxy
}
}
3. Start Keepalived in both nodes:
$ sudo /etc/init.d/keepalived start
4. Verify the Keepalived status. LB1 should hold the VIP and the MASTER state while LB2 should run as BACKUP state without VIP:
LB1 IP:
$ ip a | grep -e inet.*eth2
inet 192.168.216.101/24 brd 192.168.216.255 scope global eth2
inet 192.168.216.100/32 scope global eth2
Mysql HA with Haproxy and Master-Master replication
Once Mysql master -master replication is done we set HA with those using HAproxy
http://enekumvenamorublog.wordpress.com/2014/06/25/mysql-replication-master-master/
HAProxy
yum install haproxy
To use Haproxy with MYsql we need to create a user in mysql so that haproxy can access it .
GRANT ALL PRIVILEGES ON *.* TO 'haproxy'@'192.168.216.180' IDENTIFIED BY '';
Sample configuration /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local0 notice
user haproxy
group haproxy
# turn on stats unix socket
stats socket /var/lib/haproxy/stats mode 777
defaults
log global
retries 2
timeout connect 1000
timeout server 5000
timeout client 5000
listen stats 192.168.255.180:80
mode http
stats enable
stats uri /stats
stats realm HAProxy\ Statistics
stats auth admin:password
listen MYSQL 192.168.255.190:3306
balance source
mode tcp
option mysql-check user haproxy
server controller1 192.168.216.130 check
server controller2 192.168.216.135 check
[root@HAPROXY ~]#
http://enekumvenamorublog.wordpress.com/2014/06/25/mysql-replication-master-master/
HAProxy
yum install haproxy
To use Haproxy with MYsql we need to create a user in mysql so that haproxy can access it .
GRANT ALL PRIVILEGES ON *.* TO 'haproxy'@'192.168.216.180' IDENTIFIED BY '';
Sample configuration /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local0 notice
user haproxy
group haproxy
# turn on stats unix socket
stats socket /var/lib/haproxy/stats mode 777
defaults
log global
retries 2
timeout connect 1000
timeout server 5000
timeout client 5000
listen stats 192.168.255.180:80
mode http
stats enable
stats uri /stats
stats realm HAProxy\ Statistics
stats auth admin:password
listen MYSQL 192.168.255.190:3306
balance source
mode tcp
option mysql-check user haproxy
server controller1 192.168.216.130 check
server controller2 192.168.216.135 check
[root@HAPROXY ~]#
Wednesday, June 25, 2014
Enable HAProxy logging on Centos
Enable HAProxy logging on CentOS
By default, HAProxy will not log to files unless we make some modifications
1. Create rsyslog configuration file
nano /etc/rsyslog.d/haproxy.conf
Add these lines to the file
# Enable UDP port 514 to listen to incoming log messages from haproxy
$ModLoad imudp
$UDPServerRun 514
$template Haproxy,"%msg%\n"
local0.=info -/var/log/haproxy/haproxy.log;Haproxy
local0.notice -/var/log/haproxy/admin.log;Haproxy
# don't log anywhere else
local0.* ~
Restart rsyslog service
/etc/init.d/rsyslog restart
Ref: http://blog.hintcafe.com/post/33689067443/haproxy-logging-with-rsyslog-on-linux
2. Modify the log rotate config to match the new folder:
nano /etc/logrotate.d/haproxy
Change
/var/log/haproxy.log {
daily
rotate 10
missingok
[...]
to
/var/log/haproxy/*.log {
daily
rotate 10
missingok
[...]
Now we can check if HAProxy logging is working.
tail -f /var/log/haproxy/haproxy.log
===================================================
global to have these messages end up in /var/log/haproxy.log you will need to:
1) configure syslog to accept network log events. This is done by adding the '-r' option to the SYSLOGD_OPTIONS in /etc/sysconfig/syslog
2) configure local2 events to go to the /var/log/haproxy.log file. A line like the following can be added to /etc/sysconfig/syslog
local2.* /var/log/haproxy.log
In haproxy conf file add
log 127.0.0.1 local2 info
By default, HAProxy will not log to files unless we make some modifications
1. Create rsyslog configuration file
nano /etc/rsyslog.d/haproxy.conf
Add these lines to the file
# Enable UDP port 514 to listen to incoming log messages from haproxy
$ModLoad imudp
$UDPServerRun 514
$template Haproxy,"%msg%\n"
local0.=info -/var/log/haproxy/haproxy.log;Haproxy
local0.notice -/var/log/haproxy/admin.log;Haproxy
# don't log anywhere else
local0.* ~
Restart rsyslog service
/etc/init.d/rsyslog restart
Ref: http://blog.hintcafe.com/post/33689067443/haproxy-logging-with-rsyslog-on-linux
2. Modify the log rotate config to match the new folder:
nano /etc/logrotate.d/haproxy
Change
/var/log/haproxy.log {
daily
rotate 10
missingok
[...]
to
/var/log/haproxy/*.log {
daily
rotate 10
missingok
[...]
Now we can check if HAProxy logging is working.
tail -f /var/log/haproxy/haproxy.log
===================================================
global to have these messages end up in /var/log/haproxy.log you will need to:
1) configure syslog to accept network log events. This is done by adding the '-r' option to the SYSLOGD_OPTIONS in /etc/sysconfig/syslog
2) configure local2 events to go to the /var/log/haproxy.log file. A line like the following can be added to /etc/sysconfig/syslog
local2.* /var/log/haproxy.log
In haproxy conf file add
log 127.0.0.1 local2 info
Mysql replication-Master-Master
MySQL Master-Master replication.
Master-1 my.cnf configuration:
mkdir /var/lib/mysql/log/
log-bin=/var/lib/mysql/log/mysql-bin
log_warnings
log_slow_queries = /var/lib/mysql/log/slow.log
long_query_time = 5
log_long_format
tmpdir = /tmp
server-id = 1
log_slave_updates
replicate-same-server-id = 0
auto_increment_increment = 10
auto_increment_offset = 1
relay-log = mysql-relay-bin
Master-2 my.cnf configuration:
mkdir /var/lib/mysql/log/
log-bin=/var/lib/mysql/log/mysql-bin
log_warnings
log_slow_queries = /var/lib/mysql/log/slow.log
long_query_time = 5
log_long_format
tmpdir = /tmp
server-id = 2
replicate-same-server-id = 0
auto_increment_increment = 2
auto_increment_offset = 2
relay-log = mysql-relay-bin
First setup Master1 as Master and Master2 as slave for Master-1:
Follow below steps:
On Master-1:
grant replication slave on *.* to 'root'@'192.168.216.135' identified by 'admin';
show master status;
It shows file name and position, Use these records on Master-2 to run it as slave for Master-1.
Step 3: Now log on to master-2 and run the below query:
CHANGE MASTER TO MASTER_HOST='192.168.216.130', MASTER_USER='root',MASTER_PASSWORD='admin', MASTER_LOG_FILE='mysql-bin.000001',MASTER_LOG_POS=106;
Step 4: start slave
Step 5: show slave status \G
On this status, the following 2 records should be as follows
Slave_IO_Running: Yes
Slave_SQL_Running: Yes
These 2 records indicates Replication status. If these parameters show “Yes” that means replication is running successfully.
Setup Master2 as Master and Master1 as slave for Master-2:
On Master-2 server:
grant replication slave on *.* to 'root'@'192.168.216.130' identified by 'admin
Step 2: mysql> show master status;
Step 3: Now log on to master-1 and run the below query:
CHANGE MASTER TO MASTER_HOST='192.168.216.135', MASTER_USER='root',MASTER_PASSWORD='admin', MASTER_LOG_FILE='mysql-bin.000001',MASTER_LOG_POS=346;
Step 4: start slave
Step 5: show slave status \G
The following parameters should show “Yes”, so that replication is running successfully
Slave_IO_Running: Yes
Slave_SQL_Running: Yes
On both servers “slave_IO_Running” and “slave_SQL_Running” parameters should always be “Yes” for successful Master-Master Replication.
Master-1 my.cnf configuration:
mkdir /var/lib/mysql/log/
log-bin=/var/lib/mysql/log/mysql-bin
log_warnings
log_slow_queries = /var/lib/mysql/log/slow.log
long_query_time = 5
log_long_format
tmpdir = /tmp
server-id = 1
log_slave_updates
replicate-same-server-id = 0
auto_increment_increment = 10
auto_increment_offset = 1
relay-log = mysql-relay-bin
Master-2 my.cnf configuration:
mkdir /var/lib/mysql/log/
log-bin=/var/lib/mysql/log/mysql-bin
log_warnings
log_slow_queries = /var/lib/mysql/log/slow.log
long_query_time = 5
log_long_format
tmpdir = /tmp
server-id = 2
replicate-same-server-id = 0
auto_increment_increment = 2
auto_increment_offset = 2
relay-log = mysql-relay-bin
First setup Master1 as Master and Master2 as slave for Master-1:
Follow below steps:
On Master-1:
grant replication slave on *.* to 'root'@'192.168.216.135' identified by 'admin';
show master status;
It shows file name and position, Use these records on Master-2 to run it as slave for Master-1.
Step 3: Now log on to master-2 and run the below query:
CHANGE MASTER TO MASTER_HOST='192.168.216.130', MASTER_USER='root',MASTER_PASSWORD='admin', MASTER_LOG_FILE='mysql-bin.000001',MASTER_LOG_POS=106;
Step 4: start slave
Step 5: show slave status \G
On this status, the following 2 records should be as follows
Slave_IO_Running: Yes
Slave_SQL_Running: Yes
These 2 records indicates Replication status. If these parameters show “Yes” that means replication is running successfully.
Setup Master2 as Master and Master1 as slave for Master-2:
On Master-2 server:
grant replication slave on *.* to 'root'@'192.168.216.130' identified by 'admin
Step 2: mysql> show master status;
Step 3: Now log on to master-1 and run the below query:
CHANGE MASTER TO MASTER_HOST='192.168.216.135', MASTER_USER='root',MASTER_PASSWORD='admin', MASTER_LOG_FILE='mysql-bin.000001',MASTER_LOG_POS=346;
Step 4: start slave
Step 5: show slave status \G
The following parameters should show “Yes”, so that replication is running successfully
Slave_IO_Running: Yes
Slave_SQL_Running: Yes
On both servers “slave_IO_Running” and “slave_SQL_Running” parameters should always be “Yes” for successful Master-Master Replication.
Tuesday, June 24, 2014
Openstack Live Migration failure: operation failed
Error in the error log nova/compute
2014-06-25 01:32:50.752 2703 ERROR nova.virt.libvirt.driver [-]
[instance: fc118bff-77a3-4300-ab27-371a314b819f] Live Migration failure: operation failed: Failed to connect to remote libvirt URI qemu+tcp://compute2/system
Try updating the libvirt configurations. Modify /etc/libvirt/libvirtd.conf. To see all of the available options
2014-06-25 01:32:50.752 2703 ERROR nova.virt.libvirt.driver [-]
[instance: fc118bff-77a3-4300-ab27-371a314b819f] Live Migration failure: operation failed: Failed to connect to remote libvirt URI qemu+tcp://compute2/system
Try updating the libvirt configurations. Modify /etc/libvirt/libvirtd.conf. To see all of the available options
before : #listen_tls = 0
after : listen_tls = 0
before : #listen_tcp = 1
after : listen_tcp = 1
add: auth_tcp = "none"
Openstack+ Shared Storage(NFS) +Permission denied
While setting up Openstack with shared storage , If we get following error while creating a instance ensure that the server is in permissive selinux mode with
getenfonce
and if that too doesn't work try giving 755 permission to the mounted directory here its /var/lib/nova
2014-06-24 18:58:20.642 5119 TRACE nova.compute.manager [instance: a7996f1f-9af2-4410-8351-139d43f00786] libvirtError: internal error Process exited while reading console log output: qemu-kvm: -chardev file,id=charserial0,path=/var/lib/nova/instances/a7996f1f-9af2-4410-8351-139d43f00786/console.log: Could not open '/var/lib/nova/instances/a7996f1f-9af2-4410-8351-139d43f00786/console.log': Permission denied
At last if nothing else worked , tell libvirtd/qemu to use root user to access datas.
[root@compute nova]# cat /etc/libvirt/qemu.conf |grep root
user = "root"
#group = "root"
[root@compute nova]#
getenfonce
and if that too doesn't work try giving 755 permission to the mounted directory here its /var/lib/nova
2014-06-24 18:58:20.642 5119 TRACE nova.compute.manager [instance: a7996f1f-9af2-4410-8351-139d43f00786] libvirtError: internal error Process exited while reading console log output: qemu-kvm: -chardev file,id=charserial0,path=/var/lib/nova/instances/a7996f1f-9af2-4410-8351-139d43f00786/console.log: Could not open '/var/lib/nova/instances/a7996f1f-9af2-4410-8351-139d43f00786/console.log': Permission denied
At last if nothing else worked , tell libvirtd/qemu to use root user to access datas.
[root@compute nova]# cat /etc/libvirt/qemu.conf |grep root
user = "root"
#group = "root"
[root@compute nova]#
Monday, June 23, 2014
Openstack Icehouse - VNC console not connecting to server
Make sure that the setting in the controller and compute node are correct and also double check the IP's. And replace the host-name with the IP.
controller - 192.168.216.130
running:
nova-consoleauth
nova-novncproxy
nova.conf:
novncproxy_host=0.0.0.0
novncproxy_port=6080
novncproxy_base_url=http://192.168.216.130:6080/vnc_auto.html
compute - 192.168.216.140
running:
nova-compute
nova.conf:
vnc_enabled=True
novncproxy_base_url=http://192.168.216.130:6080/vnc_auto.html
vncserver_listen=0.0.0.0
vncserver_proxyclient_address=192.168.216.140
controller - 192.168.216.130
running:
nova-consoleauth
nova-novncproxy
nova.conf:
novncproxy_host=0.0.0.0
novncproxy_port=6080
novncproxy_base_url=http://192.168.216.130:6080/vnc_auto.html
compute - 192.168.216.140
running:
nova-compute
nova.conf:
vnc_enabled=True
novncproxy_base_url=http://192.168.216.130:6080/vnc_auto.html
vncserver_listen=0.0.0.0
vncserver_proxyclient_address=192.168.216.140
Wednesday, June 18, 2014
Openstack Icehouse Part 6 : Testing the Setup + Horizon
Creating the Key
$ ssh-keygen
Add the public key to your OpenStack environment:
$ nova keypair-add --pub-key ~/.ssh/id_rsa.pub demo-key
Verify addition of the public key:
$ nova keypair-list
+----------+-------------------------------------------------+
| Name | Fingerprint |
+----------+-------------------------------------------------+
| demo-key | 6c:74:ec:3a:08:05:4e:9e:21:22:a6:dd:b2:62:b8:28 |
+----------+-------------------------------------------------+
nova image-list
+--------------------------------------+---------------------+--------+--------+
| ID | Name | Status | Server |
+--------------------------------------+---------------------+--------+--------+
| acafc7c0-40aa-4026-9673-b879898e1fc2 | cirros-0.3.2-x86_64 | ACTIVE | |
+--------------------------------------+---------------------+--------+--------+
Your first instance uses the cirros-0.3.2-x86_64 image.
List available networks:
$ neutron net-list
+--------------------------------------+----------+-------------------------------------------------------+
| id | name | subnets |
+--------------------------------------+----------+-------------------------------------------------------+
| 3c612b5a-d1db-498a-babb-a4c50e344cb1 | demo-net | 20bcd3fd-5785-41fe-ac42-55ff884e3180 192.168.1.0/24 |
| 9bce64a3-a963-4c05-bfcd-161f708042d1 | ext-net | b54a8d85-b434-4e85-a8aa-74873841a90d 203.0.113.0/24 |
+--------------------------------------+----------+-------------------------------------------------------+
Your first instance uses the demo-net tenant network. However, you must reference this network using the ID instead of the name.
List available security groups:
$ nova secgroup-list
+--------------------------------------+---------+-------------+
| Id | Name | Description |
+--------------------------------------+---------+-------------+
| ad8d4ea5-3cad-4f7d-b164-ada67ec59473 | default | default |
+--------------------------------------+---------+-------------+
Creating a New flavor
nova boot --flavor m1.tiny --image cirros-0.3.2-x86_64 --nic net-id=8cc217b0-96a6-4e98-a901-a694ebff173f --security-group default --key-name demo-key demo-instance1
Creating the instance from back end
nova boot --poll --flavor m1.tiny --image cirros-0.3.2-x86_64 --nic net-id=69c6ca95-2f5d-4173-8973-164c5129cb27 --security-group default --key-name Chumma demo-instance
Install Horizone on Controller Node
yum install memcached python-memcached mod_wsgi openstack-dashboard
Edit /etc/openstack-dashboard/local_settings:
ALLOWED_HOSTS = ['localhost', 'my-desktop']
service httpd start
service memcached start
chkconfig httpd on
chkconfig memcached on
Openstack Icehouse Part 5 : Configuring EXTERNAL NETWORK
To create the external network on controller
source /root/admin-openrc.sh
neutron net-create ext-net --shared --router:external=True
To create a subnet on the external network
neutron subnet-create ext-net --name ext-subnet --allocation-pool start=192.168.255.160,end=192.168.255.180 --disable-dhcp --gateway 192.168.255.2 ext_net 192.168.255.0/24
#To create the tenant network
source /root/demo-openrc.sh
#Create the network:
neutron net-create demo-net
#To create a subnet on the tenant network
neutron subnet-create demo-net --name demo-subnet --gateway 10.0.0.1 10.0.0.0/24
#Create the router:
neutron router-create demo-router
#Attach the router to the demo tenant subnet:
neutron router-interface-add demo-router demo-subnet
#Added interface b1a894fd-aee8-475c-9262-4342afdc1b58 to router demo-router.
neutron router-gateway-set demo-router ext-net
#Set gateway for router demo-router
Now check whether the gateway of the external-network here it will be 192.168.255.160 which is first Ip of the range. Try pinging to the IP and if its not working Stop there and remove all the routers and gateway and redo it using the ID. If the gate way Don't get pinged the instance won't be able to access outside the network.
Set the neutron router-interface-add and neutron router-gateway-set BY ID
neutron router-list
+--------------------------------------+-------------+--------------------------------------------------------+
| id | name | external_gateway_info |
+--------------------------------------+-------------+--------------------------------------------------------+
| 020f48d9-182e-4e33-a73f-813333533092 | router-demo | {"network_id": "9a457578-8f85-486b-9cd0-f7f04922ba0c"} |
+--------------------------------------+-------------+--------------------------------------------------------+
# neutron net-list
+--------------------------------------+----------+----------------------------------------------------+
| id | name | subnets |
+--------------------------------------+----------+----------------------------------------------------+
| 07e10f48-0637-46bb-a444-695646e6bd15 | net-demo | c042e65e-3892-45bc-aeb0-625ce5f4aaaf 50.50.1.0/24 |
| 9a457578-8f85-486b-9cd0-f7f04922ba0c | ext_net | 0bcccf59-be17-48c7-8032-e00fd4f15b46 1.2.3.0/24 |
+--------------------------------------+----------+----------------------------------------------------+
#neutron router-gateway-set 020f48d9-182e-4e33-a73f-813333533092 9a457578-8f85-486b-9cd0-f7f04922ba0c
Openstack Icehouse Part 4 Neutron
OpenStack Networking (neutron) Configure controller node
$ mysql -u root -p
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'neutron4mar';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron4mar';
exit
keystone user-create --name neutron --pass neutron4mar --email neutron@example.com
keystone user-role-add --user neutron --tenant service --role admin
keystone service-create --name neutron --type network --description "OpenStack Networking"
keystone endpoint-create --service-id $(keystone service-list | awk '/ network / {print $2}') --publicurl http://controller:9696 --adminurl http://controller:9696 --internalurl http://controller:9696
To install the Networking components
# yum -y install openstack-neutron openstack-neutron-ml2 python-neutronclient
openstack-config --set /etc/neutron/neutron.conf database connection mysql://neutron:neutron4mar@controller/neutron
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_host controller
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_user neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_password neutron4mar
openstack-config --set /etc/neutron/neutron.conf DEFAULT rpc_backend neutron.openstack.common.rpc.impl_qpid
openstack-config --set /etc/neutron/neutron.conf DEFAULT qpid_hostname controller
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_status_changes True
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes True
openstack-config --set /etc/neutron/neutron.conf DEFAULT nova_url http://controller:8774/v2
openstack-config --set /etc/neutron/neutron.conf DEFAULT nova_admin_username nova
openstack-config --set /etc/neutron/neutron.conf DEFAULT nova_admin_tenant_id $(keystone tenant-list | awk '/ service / { print $2 }')
openstack-config --set /etc/neutron/neutron.conf DEFAULT nova_admin_password nova4mar
openstack-config --set /etc/neutron/neutron.conf DEFAULT nova_admin_auth_url http://controller:35357/v2.0
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins router
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers gre
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types gre
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers openvswitch
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_gre tunnel_id_ranges 1:1000
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_security_group True
openstack-config --set /etc/nova/nova.conf DEFAULT network_api_class nova.network.neutronv2.api.API
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_url http://controller:9696
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_auth_strategy keystone
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_tenant_name service
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_username neutron
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_password neutron4mar
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_auth_url http://controller:35357/v2.0
openstack-config --set /etc/nova/nova.conf DEFAULT linuxnet_interface_driver nova.network.linux_net.LinuxOVSInterfaceDriver
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf DEFAULT security_group_api neutron
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
service openstack-nova-api restart
service openstack-nova-scheduler restart
service openstack-nova-conductor restart
service neutron-server start
chkconfig neutron-server on
Neutron ON NETWORK NODE
Edit /etc/sysctl.conf to contain the following:
net.ipv4.ip_forward=1
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
Implement the changes:
sysctl -p
yum -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_host controller
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_user neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_password neutron4mar
openstack-config --set /etc/neutron/neutron.conf DEFAULT rpc_backend neutron.openstack.common.rpc.impl_qpid
openstack-config --set /etc/neutron/neutron.conf DEFAULT qpid_hostname controller
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins router
verbose = True to the [DEFAULT] section in /etc/neutron/neutron.conf to assist with troubleshooting.
Comment out any lines in the [service_providers] section.
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT use_namespaces True
We recommend adding verbose = True to the [DEFAULT] section in /etc/neutron/l3_agent.ini to assist with troubleshooting.
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver neutron.agent.linux.dhcp.Dnsmasq
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT use_namespaces True
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT auth_url http://controller:5000/v2.0
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT auth_region regionOne
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT admin_tenant_name service
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT admin_user neutron
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT admin_password neutron4mar
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT nova_metadata_ip controller
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT metadata_proxy_shared_secret meta4mar
We recommend adding verbose = True to the [DEFAULT] section in /etc/neutron/metadata_agent.ini to assist with troubleshooting.
Perform the next two steps on the controller node.
On the controller node, configure Compute to use the metadata service:
Replace METADATA_SECRET with the secret you chose for the metadata proxy.
openstack-config --set /etc/nova/nova.conf DEFAULT service_neutron_metadata_proxy true
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_metadata_proxy_shared_secret meta4mar
#On the controller node, restart the Compute API service:
service openstack-nova-api restart
To configure the Modular Layer 2 (ML2) plug-in
Replace INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS with the IP address of the instance tunnels network interface on your network node.
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers gre
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types gre
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers openvswitch
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_gre tunnel_id_ranges 1:1000
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs local_ip 192.168.216.151
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs tunnel_type gre
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs enable_tunneling True
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_security_group True
service openvswitch start
chkconfig openvswitch on
#Add the integration bridge:
ovs-vsctl add-br br-int
#Add the external bridge:
ovs-vsctl add-br br-ex
#Add a port to the external bridge that connects to the physical external network interface:
#Replace INTERFACE_NAME with the actual interface name. For example, eth2 or ens256.
ovs-vsctl add-port br-ex eth4
Depending on your network interface driver, you may need to disable Generic Receive Offload (GRO) to achieve suitable throughput between your instances and the external network.
To temporarily disable GRO on the external network interface while testing your environment:
# ethtool -K INTERFACE_NAME gro off
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
cp /etc/init.d/neutron-openvswitch-agent /etc/init.d/neutron-openvswitch-agent.orig
sed -i 's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
service neutron-openvswitch-agent start
service neutron-l3-agent start
service neutron-dhcp-agent start
service neutron-metadata-agent start
chkconfig neutron-openvswitch-agent on
chkconfig neutron-l3-agent on
chkconfig neutron-dhcp-agent on
chkconfig neutron-metadata-agent on
Neutron Configure compute node
Edit /etc/sysctl.conf to contain the following:
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
Implement the changes:
# sysctl -p
To install the Networking components
yum -y install openstack-neutron-ml2 openstack-neutron-openvswitch
Replace NEUTRON_PASS with the password you chose for the neutron user in the Identity service.
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_host controller
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_user neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_password neutron4mar
openstack-config --set /etc/neutron/neutron.conf DEFAULT rpc_backend neutron.openstack.common.rpc.impl_qpid
openstack-config --set /etc/neutron/neutron.conf DEFAULT qpid_hostname controller
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins router
Replace INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS with the IP address of the instance tunnels network interface on your compute node. This guide uses 10.0.1.31 for the IP address of the instance tunnels network interface on the first compute node.
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers gre
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types gre
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers openvswitch
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_gre tunnel_id_ranges 1:1000
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs local_ip 192.168.216.141
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs tunnel_type gre
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs enable_tunneling True
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_security_group True
service openvswitch start
chkconfig openvswitch on
#Add the integration bridge:
ovs-vsctl add-br br-int
Replace NEUTRON_PASS with the password you chose for the neutron user in the Identity service.
openstack-config --set /etc/nova/nova.conf DEFAULT network_api_class nova.network.neutronv2.api.API
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_url http://controller:9696
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_auth_strategy keystone
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_tenant_name service
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_username neutron
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_password neutron4mar
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_auth_url http://controller:35357/v2.0
openstack-config --set /etc/nova/nova.conf DEFAULT linuxnet_interface_driver nova.network.linux_net.LinuxOVSInterfaceDriver
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf DEFAULT security_group_api neutron
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
#Due to a packaging bug, the Open vSwitch agent initialization script explicitly looks for the Open vSwitch plug-in configuration file rather than a symbolic link /etc/neutron/plugin.ini pointing to the ML2 plug-in configuration file. Run the following commands to resolve this issue:
cp /etc/init.d/neutron-openvswitch-agent /etc/init.d/neutron-openvswitch-agent.orig
sed -i 's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
#Restart the Compute service:
service openstack-nova-compute restart
#Start the Open vSwitch (OVS) agent and configure it to start when the system boots:
service neutron-openvswitch-agent start
chkconfig neutron-openvswitch-agent on
Openstack Icehouse Part 3 NOVA
COMPUTE SERVER CONFIGURATION On Controller
yum -y install openstack-nova-api openstack-nova-cert openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler python-novaclient
openstack-config --set /etc/nova/nova.conf database connection mysql://nova:nova4mar@controller/nova
openstack-config --set /etc/nova/nova.conf DEFAULT rpc_backend qpid
openstack-config --set /etc/nova/nova.conf DEFAULT qpid_hostname controller
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.216.130
openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_listen 192.168.216.130
openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_proxyclient_address 192.168.216.130
mysql -u root -p
CREATE DATABASE nova;
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'nova4mar';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova4mar';
exit
#Create the Compute service tables:
su -s /bin/sh -c "nova-manage db sync" nova
keystone user-create --name=nova --pass=nova4mar --email=nova@example.com
keystone user-role-add --user=nova --tenant=service --role=admin
openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_host controller
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/nova/nova.conf keystone_authtoken admin_user nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken admin_password nova4mar
keystone service-create --name=nova --type=compute --description="OpenStack Compute"
keystone endpoint-create --service-id=$(keystone service-list | awk '/ compute / {print $2}') --publicurl=http://controller:8774/v2/%\(tenant_id\)s --internalurl=http://controller:8774/v2/%\(tenant_id\)s --adminurl=http://controller:8774/v2/%\(tenant_id\)s
service openstack-nova-api start
service openstack-nova-cert start
service openstack-nova-consoleauth start
service openstack-nova-scheduler start
service openstack-nova-conductor start
service openstack-nova-novncproxy start
chkconfig openstack-nova-api on
chkconfig openstack-nova-cert on
chkconfig openstack-nova-consoleauth on
chkconfig openstack-nova-scheduler on
chkconfig openstack-nova-conductor on
chkconfig openstack-nova-novncproxy on
nova image-list
Add a rule to the default Nova Security Group to allow SSH access and Ping to instances:
nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
NOVA ON COMPUTE NODE
Install the Compute packages:
yum -y install openstack-nova-compute
Edit the /etc/nova/nova.conf configuration file:
openstack-config --set /etc/nova/nova.conf database connection mysql://nova:NOVA_DBPASS@controller/nova
openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_host controller
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/nova/nova.conf keystone_authtoken admin_user nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken admin_password nova4mar
openstack-config --set /etc/nova/nova.conf DEFAULT rpc_backend qpid
openstack-config --set /etc/nova/nova.conf DEFAULT qpid_hostname controller
#Configure Compute to provide remote console access to instances.
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.216.140
openstack-config --set /etc/nova/nova.conf DEFAULT vnc_enabled True
openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_listen 0.0.0.0
openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_proxyclient_address 192.168.216.140
openstack-config --set /etc/nova/nova.conf DEFAULT novncproxy_base_url http://controller:6080/vnc_auto.html
#Specify the host that runs the Image Service.
openstack-config --set /etc/nova/nova.conf DEFAULT glance_host controller
#You must determine whether your system's processor and/or hypervisor support hardware acceleration for virtual machines.
Run the following command:
$ egrep -c '(vmx|svm)' /proc/cpuinfo
If this command returns a value of one or greater, your system supports hardware acceleration which typically requires no additional configuration.
If this command returns a value of zero, your system does not support hardware acceleration and you must configure libvirt to use QEMU instead of KVM.
Run the following command:
# openstack-config --set /etc/nova/nova.conf libvirt virt_type qemu
Start the Compute service and configure it to start when the system boots:
service libvirtd start
service messagebus start
chkconfig libvirtd on
chkconfig messagebus on
service openstack-nova-compute start
chkconfig openstack-nova-compute on
OpenStack – Icehouse –Part 2 Glance
Configure the Image Service On controller Server
yum install openstack-glance python-glanceclient -y
openstack-config --set /etc/glance/glance-api.conf database connection mysql://glance:glance4mar@controller/glance
openstack-config --set /etc/glance/glance-registry.conf database connection mysql://glance:glance4mar@controller/glance
openstack-config --set /etc/glance/glance-api.conf DEFAULT rpc_backend qpid
openstack-config --set /etc/glance/glance-api.conf DEFAULT qpid_hostname controller
mysql -u root -p
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'glance4mar';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance4mar';
exit
su -s /bin/sh -c "glance-manage db_sync" glance
keystone user-create --name=glance --pass=glance4mar --email=glance@example.com
keystone user-role-add --user=glance --tenant=service --role=admin
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_host controller
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken admin_user glance
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken admin_password glance4mar
openstack-config --set /etc/glance/glance-api.conf paste_deploy flavor keystone
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_host controller
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken admin_user glance
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken admin_password glance4mar
openstack-config --set /etc/glance/glance-registry.conf paste_deploy flavor keystone
keystone service-create --name=glance --type=image --description="OpenStack Image Service"
keystone endpoint-create --service-id=$(keystone service-list | awk '/ image / {print $2}') --publicurl=http://controller:9292 --internalurl=http://controller:9292 --adminurl=http://controller:9292
service openstack-glance-api start
service openstack-glance-registry start
chkconfig openstack-glance-api on
chkconfig openstack-glance-registry on
#Verify the Image Service installation
mkdir /tmp/images
cd /tmp/images/
wget http://cdn.download.cirros-cloud.net/0.3.2/cirros-0.3.2-x86_64-disk.img
source /root/admin-openrc.sh
glance image-create --name "cirros-0.3.2-x86_64" --disk-format qcow2 --container-format bare --is-public True --progress < cirros-0.3.2-x86_64-disk.img
cd /
rm -rf /tmp/images
glance image-list
Importing Images into Glance
You can load an image from the command line with glance, eg:
glance image-create --name 'Fedora 19 x86_64' --disk-format qcow2 --container-format bare --is-public true \
--copy-from http://cloud.fedoraproject.org/fedora-19.x86_64.qcow2
Building Your Own Images
Alternatively, one can use diskimage-builder, which is available in the RDO repository:
yum install diskimage-builder
$ disk-image-create -a amd64 fedora vm -o fedora-image.qcow2
More Images In Following URL
http://openstack.redhat.com/Image_resources
Tuesday, June 17, 2014
OpenStack - Icehouse --Part 1 Keystone.
The OpenStack project is an open source cloud computing platform that supports all types of cloud environments. The project aims for simple implementation, massive scalability, and a rich set of features. Cloud computing experts from around the world contribute to the project.
OpenStack provides an Infrastructure-as-a-Service (IaaS) solution through a variety of complemental services. Each service offers an application programming interface (API) that facilitates this integration. The following table provides a list of OpenStack services:
Sample Architecture We are trying to Set up. The Ip's will Vary , Please do check and clear ..
ON ALL THE NODE
#Making Selinux to Permissive
sed -i "s/SELINUX=.*/SELINUX=permissive/g" /etc/sysconfig/selinux
yum -y install policycoreutils setroubleshoot
setenforce 0
yum install -y euca2ools
yum install -y yum-plugin-priorities gedit curl wget nc
yum -y install ntp
service ntpd start
chkconfig ntpd on
yum -y install http://repos.fedorapeople.org/repos/openstack/openstack-icehouse/rdo-release-icehouse-3.noarch.rpm
yum -y install http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
yum -y install openstack-utils
yum -y install openstack-selinux
yum -y upgrade
On All node add the following Rules in Iptables
-A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 443 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 5000 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 5672 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 6080 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 8774 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 9292 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 9696 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 35357 -j ACCEPT
yum install MySQL-python -y
yum -y install qpid-cpp-server memcached
sed -i "s/auth=yes/auth=no/g" /etc/qpidd.conf
service qpidd start
chkconfig qpidd on
yum install mysql mysql-server MySQL-python -y
service mysqld start
chkconfig mysqld on
mysql_secure_installation
192.168.255.130 controller
192.168.216.130 controller
192.168.216.140 compute
192.168.216.141 compute
192.168.255.150 network
eth4 netwrok << Public Connection
192.168.216.150 netwrok
192.168.216.151 network
One of the external interface uses a special configuration without an IP address assigned to it. Configure the third interface as the external interface:
Replace INTERFACE_NAME with the actual interface name. For example, eth2 or ens256.
Edit the /etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME file to contain the following:
Do not change the HWADDR and UUID keys.
DEVICE=INTERFACE_NAME
TYPE=Ethernet
ONBOOT="yes"
BOOTPROTO="none"
Restart networking:
service network restart
=========================================
KEYSTONE_DBPASS keystone4mar Database password of Identity service
DEMO_PASS demo4mar Password of user demo
ADMIN_PASS admin4mar Password of user admin
GLANCE_DBPASS glance4mar Database password for Image Service
GLANCE_PASS glance4mar Password of Image Service user glance
NOVA_DBPASS nova4mar Database password for Compute service
NOVA_PASS nova4mar Password of Compute service user nova
DASH_DBPASS dash4mar Database password for the dashboard
CINDER_DBPASS cinder4mar Database password for the Block Storage service
CINDER_PASS cinder4mar Password of Block Storage service user cinder
NEUTRON_DBPASS neutron4mar Database password for the Networking service
NEUTRON_PASS neutron4mar Password of Networking service user neutron
HEAT_DBPASS heat4mar Database password for the Orchestration service
HEAT_PASS heat4mar Password of Orchestration service user heat
CEILOMETER_DBPASS ceil4mar Database password for the Telemetry service
CEILOMETER_PASS ceil4mar Password of Telemetry service user ceilometer
TROVE_DBPASS trove4mar Database password of Database service
TROVE_PASS trove4mar Password of Database Service user trove
=========================================
In my.cnf configure for INnode DB
bind-address = ***.***.***.***
default-storage-engine = innodb
innodb_file_per_table
collation-server = utf8_general_ci
init-connect = 'SET NAMES utf8'
character-set-server = utf8
yum install openstack-keystone python-keystoneclient -y
openstack-config --set /etc/keystone/keystone.conf database connection mysql://keystone:keystone4mar@controller/keystone
$ mysql -u root -p
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'keystone4mar';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone4mar';
exit
Create the database tables for the Identity Service:
su -s /bin/sh -c "keystone-manage db_sync" keystone
ADMIN_TOKEN=$(openssl rand -hex 10)
echo $ADMIN_TOKEN
openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_token $ADMIN_TOKEN
keystone-manage pki_setup --keystone-user keystone --keystone-group keystone
chown -R keystone:keystone /etc/keystone/ssl
chmod -R o-rwx /etc/keystone/ssl
service openstack-keystone start
chkconfig openstack-keystone on
#Define users, tenants, and roles
*********Replace ADMIN_TOKEN with your authorization token
#export OS_SERVICE_TOKEN=$ADMIN_TOKEN
export OS_SERVICE_TOKEN=$(echo $ADMIN_TOKEN)
export OS_SERVICE_ENDPOINT=http://controller:35357/v2.0
#Create an administrative user
keystone user-create --name=admin --pass=admin4mar --email=ADMIN_EMAIL
keystone role-create --name=admin
keystone tenant-create --name=admin --description="Admin Tenant"
keystone user-role-add --user=admin --tenant=admin --role=admin
keystone user-role-add --user=admin --role=_member_ --tenant=admin
#Create a normal user
keystone user-create --name=demo --pass=demo4mar --email=DEMO_EMAIL
keystone tenant-create --name=demo --description="Demo Tenant"
keystone user-role-add --user=demo --role=_member_ --tenant=demo
#Create a service tenant
keystone tenant-create --name=service --description="Service Tenant"
#Define services and API endpoints
keystone service-create --name=keystone --type=identity --description="OpenStack Identity"
keystone endpoint-create --service-id=$(keystone service-list | awk '/ identity / {print $2}') --publicurl=http://controller:5000/v2.0 --internalurl=http://controller:5000/v2.0 --adminurl=http://controller:35357/v2.0
#Verify the Identity Service installation
unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT
keystone --os-username=admin --os-password=admin4mar --os-auth-url=http://controller:35357/v2.0 token-get
keystone --os-username=admin --os-password=admin4mar --os-tenant-name=admin --os-auth-url=http://controller:35357/v2.0 token-get
echo "export OS_USERNAME=admin
export OS_PASSWORD=admin4mar
export OS_TENANT_NAME=admin
export OS_AUTH_URL=http://controller:35357/v2.0" >> /root/admin-openrc.sh
cat /root/admin-openrc.sh
source /root/admin-openrc.sh
keystone token-get
keystone user-list
keystone user-role-list --user admin --tenant admin
yum install python-pip
pip install python-PROJECTclient
ceilometer - Telemetry API
cinder - Block Storage API and extensions
glance - Image Service API
heat - Orchestration API
keystone - Identity service API and extensions
neutron - Networking API
nova - Compute API and extensions
swift - Object Storage API
trove - Database Service API
#On Red Hat Enterprise Linux, CentOS, or Fedora, use yum to install the clients from the packaged versions available in RDO:
yum install python-PROJECTclient
=====================
echo "export OS_USERNAME=demo
export OS_PASSWORD=demo4mar
export OS_TENANT_NAME=demo
export OS_AUTH_URL=http://controller:35357/v2.0" >> /root/demo-openrc.sh
cat /root/demo-openrc.sh
OpenStack provides an Infrastructure-as-a-Service (IaaS) solution through a variety of complemental services. Each service offers an application programming interface (API) that facilitates this integration. The following table provides a list of OpenStack services:
Dashboard | Horizon | Provides a web-based self-service portal to interact with underlying OpenStack services, such as launching an instance, assigning IP addresses and configuring access controls. |
Compute | Nova | Manages the lifecycle of compute instances in an OpenStack environment. Responsibilities include spawning, scheduling and decommissioning of virtual machines on demand. |
Networking | Neutron | Enables network connectivity as a service for other OpenStack services, such as OpenStack Compute. Provides an API for users to define networks and the attachments into them. Has a pluggable architecture that supports many popular networking vendors and technologies. |
Storage | ||
---|---|---|
Object Storage | Swift | Stores and retrieves arbitrary unstructured data objects via a RESTful, HTTP based API. It is highly fault tolerant with its data replication and scale out architecture. Its implementation is not like a file server with mountable directories. |
Block Storage | Cinder | Provides persistent block storage to running instances. Its pluggable driver architecture facilitates the creation and management of block storage devices. |
Shared services | ||
Identity service | Keystone | Provides an authentication and authorization service for other OpenStack services. Provides a catalog of endpoints for all OpenStack services. |
Image Service | Glance | Stores and retrieves virtual machine disk images. OpenStack Compute makes use of this during instance provisioning. |
Telemetry | Ceilometer | Monitors and meters the OpenStack cloud for billing, benchmarking, scalability, and statistical purposes. |
Higher-level services | ||
Orchestration | Heat | Orchestrates multiple composite cloud applications by using either the native HOT template format or the AWS CloudFormation template format, through both an OpenStack-native REST API and a CloudFormation-compatible Query API. |
Database Service | Trove | Provides scalable and reliable Cloud Database-as-a-Service functionality for both relational and non-relational database engines. |
Sample Architecture We are trying to Set up. The Ip's will Vary , Please do check and clear ..
ON ALL THE NODE
#Making Selinux to Permissive
sed -i "s/SELINUX=.*/SELINUX=permissive/g" /etc/sysconfig/selinux
yum -y install policycoreutils setroubleshoot
setenforce 0
yum install -y euca2ools
yum install -y yum-plugin-priorities gedit curl wget nc
yum -y install ntp
service ntpd start
chkconfig ntpd on
yum -y install http://repos.fedorapeople.org/repos/openstack/openstack-icehouse/rdo-release-icehouse-3.noarch.rpm
yum -y install http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
yum -y install openstack-utils
yum -y install openstack-selinux
yum -y upgrade
On All node add the following Rules in Iptables
-A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 443 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 5000 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 5672 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 6080 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 8774 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 9292 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 9696 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 35357 -j ACCEPT
ON OTHER NODE
yum install MySQL-python -y
ON CONTROLLER NODE
yum -y install qpid-cpp-server memcached
sed -i "s/auth=yes/auth=no/g" /etc/qpidd.conf
service qpidd start
chkconfig qpidd on
yum install mysql mysql-server MySQL-python -y
service mysqld start
chkconfig mysqld on
mysql_secure_installation
Over ALL Network
192.168.255.130 controller
192.168.216.130 controller
192.168.216.140 compute
192.168.216.141 compute
192.168.255.150 network
eth4 netwrok << Public Connection
192.168.216.150 netwrok
192.168.216.151 network
On NETWORK NODE
One of the external interface uses a special configuration without an IP address assigned to it. Configure the third interface as the external interface:
Replace INTERFACE_NAME with the actual interface name. For example, eth2 or ens256.
Edit the /etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME file to contain the following:
Do not change the HWADDR and UUID keys.
DEVICE=INTERFACE_NAME
TYPE=Ethernet
ONBOOT="yes"
BOOTPROTO="none"
Restart networking:
service network restart
=========================================
KEYSTONE_DBPASS keystone4mar Database password of Identity service
DEMO_PASS demo4mar Password of user demo
ADMIN_PASS admin4mar Password of user admin
GLANCE_DBPASS glance4mar Database password for Image Service
GLANCE_PASS glance4mar Password of Image Service user glance
NOVA_DBPASS nova4mar Database password for Compute service
NOVA_PASS nova4mar Password of Compute service user nova
DASH_DBPASS dash4mar Database password for the dashboard
CINDER_DBPASS cinder4mar Database password for the Block Storage service
CINDER_PASS cinder4mar Password of Block Storage service user cinder
NEUTRON_DBPASS neutron4mar Database password for the Networking service
NEUTRON_PASS neutron4mar Password of Networking service user neutron
HEAT_DBPASS heat4mar Database password for the Orchestration service
HEAT_PASS heat4mar Password of Orchestration service user heat
CEILOMETER_DBPASS ceil4mar Database password for the Telemetry service
CEILOMETER_PASS ceil4mar Password of Telemetry service user ceilometer
TROVE_DBPASS trove4mar Database password of Database service
TROVE_PASS trove4mar Password of Database Service user trove
=========================================
On Controller Node
In my.cnf configure for INnode DB
bind-address = ***.***.***.***
default-storage-engine = innodb
innodb_file_per_table
collation-server = utf8_general_ci
init-connect = 'SET NAMES utf8'
character-set-server = utf8
Installing Identity Service On Controller Node
yum install openstack-keystone python-keystoneclient -y
openstack-config --set /etc/keystone/keystone.conf database connection mysql://keystone:keystone4mar@controller/keystone
$ mysql -u root -p
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'keystone4mar';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone4mar';
exit
Create the database tables for the Identity Service:
su -s /bin/sh -c "keystone-manage db_sync" keystone
ADMIN_TOKEN=$(openssl rand -hex 10)
echo $ADMIN_TOKEN
openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_token $ADMIN_TOKEN
keystone-manage pki_setup --keystone-user keystone --keystone-group keystone
chown -R keystone:keystone /etc/keystone/ssl
chmod -R o-rwx /etc/keystone/ssl
service openstack-keystone start
chkconfig openstack-keystone on
#Define users, tenants, and roles
*********Replace ADMIN_TOKEN with your authorization token
#export OS_SERVICE_TOKEN=$ADMIN_TOKEN
export OS_SERVICE_TOKEN=$(echo $ADMIN_TOKEN)
export OS_SERVICE_ENDPOINT=http://controller:35357/v2.0
#Create an administrative user
keystone user-create --name=admin --pass=admin4mar --email=ADMIN_EMAIL
keystone role-create --name=admin
keystone tenant-create --name=admin --description="Admin Tenant"
keystone user-role-add --user=admin --tenant=admin --role=admin
keystone user-role-add --user=admin --role=_member_ --tenant=admin
#Create a normal user
keystone user-create --name=demo --pass=demo4mar --email=DEMO_EMAIL
keystone tenant-create --name=demo --description="Demo Tenant"
keystone user-role-add --user=demo --role=_member_ --tenant=demo
#Create a service tenant
keystone tenant-create --name=service --description="Service Tenant"
#Define services and API endpoints
keystone service-create --name=keystone --type=identity --description="OpenStack Identity"
keystone endpoint-create --service-id=$(keystone service-list | awk '/ identity / {print $2}') --publicurl=http://controller:5000/v2.0 --internalurl=http://controller:5000/v2.0 --adminurl=http://controller:35357/v2.0
#Verify the Identity Service installation
unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT
keystone --os-username=admin --os-password=admin4mar --os-auth-url=http://controller:35357/v2.0 token-get
keystone --os-username=admin --os-password=admin4mar --os-tenant-name=admin --os-auth-url=http://controller:35357/v2.0 token-get
echo "export OS_USERNAME=admin
export OS_PASSWORD=admin4mar
export OS_TENANT_NAME=admin
export OS_AUTH_URL=http://controller:35357/v2.0" >> /root/admin-openrc.sh
cat /root/admin-openrc.sh
source /root/admin-openrc.sh
keystone token-get
keystone user-list
keystone user-role-list --user admin --tenant admin
IF WE WANT TO INSTALL THE CLIENTS
yum install python-pip
pip install python-PROJECTclient
ceilometer - Telemetry API
cinder - Block Storage API and extensions
glance - Image Service API
heat - Orchestration API
keystone - Identity service API and extensions
neutron - Networking API
nova - Compute API and extensions
swift - Object Storage API
trove - Database Service API
#On Red Hat Enterprise Linux, CentOS, or Fedora, use yum to install the clients from the packaged versions available in RDO:
yum install python-PROJECTclient
Creating a client Profile file
=====================
echo "export OS_USERNAME=demo
export OS_PASSWORD=demo4mar
export OS_TENANT_NAME=demo
export OS_AUTH_URL=http://controller:35357/v2.0" >> /root/demo-openrc.sh
cat /root/demo-openrc.sh
Wednesday, June 11, 2014
Putty + Remote tunnel + RDP
Installing Putty and Configuring SSH Tunnel and Remote Desktop
On the CLIENT computer we are connecting from, we will need to install Putty and configure it to connect RDP over SSH (ie create the tunnel).
1. To install putty, just extract the Zip for to your C:\Putty folder. The Putty folder should contain several .exe programs.
2. To run putty, we will just run the Putty.exe in the C:\Putty folder. To make it easier to launch, you can create a shortcut to Putty.exe and put it on your desktop or in your Start Menu.
3. Under the Session section (on left pane), type in the host name of the pc we are connecting to (in our example on our local network). 10.0.1.5 and leave the port at 22. Also you can go under the Saved Session box and enter a name to save the profile as for easy connection (more later on this).
Under the Connection > SSH > Tunnels tab, under Source Port, enter in a local port to connect to as our tunnel (i use a very high port in the 40000 range, we’ll use 40000), in the Destination box, we can put in the ip address of the remote computer we have running Copssh/SSH, 10.0.1.5 in my example.
Go back to the Sessions section and click the Save button under the Saved Sessions box and then hit the Open button.
4. You should get a prompt to accept a key the first time we connect, click Yes.
5. We now should get a command window like interface asking for a user. Enter your remote computers login username and password. Once you connect, the command window will change to a local window.
Connecting via Remote Desktop over the SSH Tunnel
1. On the laptop/client computer, open Remote Desktop Connection (Start Menu > All Programs > Accessories > Remote Desktop Connection)
2. Enter in 127.0.0.1:40000 for the computer to connect to.
127.0.0.1 = the local tcp/ip stack loopback address and 40000 = port to connect over. This in turn forces our remote desktop client to use the SSH tunnel we created at 40000 to connect to our remote pc at the 22 port.
On the CLIENT computer we are connecting from, we will need to install Putty and configure it to connect RDP over SSH (ie create the tunnel).
1. To install putty, just extract the Zip for to your C:\Putty folder. The Putty folder should contain several .exe programs.
2. To run putty, we will just run the Putty.exe in the C:\Putty folder. To make it easier to launch, you can create a shortcut to Putty.exe and put it on your desktop or in your Start Menu.
3. Under the Session section (on left pane), type in the host name of the pc we are connecting to (in our example on our local network). 10.0.1.5 and leave the port at 22. Also you can go under the Saved Session box and enter a name to save the profile as for easy connection (more later on this).
Under the Connection > SSH > Tunnels tab, under Source Port, enter in a local port to connect to as our tunnel (i use a very high port in the 40000 range, we’ll use 40000), in the Destination box, we can put in the ip address of the remote computer we have running Copssh/SSH, 10.0.1.5 in my example.
Go back to the Sessions section and click the Save button under the Saved Sessions box and then hit the Open button.
4. You should get a prompt to accept a key the first time we connect, click Yes.
5. We now should get a command window like interface asking for a user. Enter your remote computers login username and password. Once you connect, the command window will change to a local window.
Connecting via Remote Desktop over the SSH Tunnel
1. On the laptop/client computer, open Remote Desktop Connection (Start Menu > All Programs > Accessories > Remote Desktop Connection)
2. Enter in 127.0.0.1:40000 for the computer to connect to.
127.0.0.1 = the local tcp/ip stack loopback address and 40000 = port to connect over. This in turn forces our remote desktop client to use the SSH tunnel we created at 40000 to connect to our remote pc at the 22 port.
Wednesday, June 4, 2014
Mail Server-Postfix+Centos+Dovecot
Setup mail server in centos 6
» Installing and configuring postfix
» Installing and configuring dovecot
» Creating users and testing
» Installing and configuring squirrelmail
» Installing and configuring postfix
Step 1 »Before installation assign a static ip and add a host entry for your domain to that IP in the /etc/hosts file like below.
echo "23.236.147.74 keralainindia.asia" >> /etc/hosts
Step 2 » install postfix
[root@kerala ~]# yum -y install postfix
Step 3 » install SMTP AUTH packages .
[root@kerala ~]# yum -y install cyrus-sasl cyrus-sasl-devel cyrus-sasl-gssapi cyrus-sasl-md5 cyrus-sasl-plain
Postfix package installation is completed .
Step 4 »creating SSL Cert.
[root@kerala ~]# mkdir /etc/postfix/ssl
[root@kerala ~]# cd /etc/postfix/ssl/
[root@kerala ssl]# openssl genrsa -des3 -rand /etc/hosts -out smtpd.key 1024
[root@kerala ssl]# chmod 600 smtpd.key
[root@kerala ssl]# openssl req -new -key smtpd.key -out smtpd.csr
[root@kerala ssl]# openssl x509 -req -days 365 -in smtpd.csr -signkey smtpd.key -out smtpd.crt
[root@kerala ssl]# openssl rsa -in smtpd.key -out smtpd.key.unencrypted
[root@kerala ssl]# mv -f smtpd.key.unencrypted smtpd.key
[root@kerala ssl]# openssl req -new -x509 -extensions v3_ca -keyout cakey.pem -out cacert.pem -days 365
Step 4 » Now open /etc/postfix/main.cf file .
Find and comment the below lines .
#inet_interfaces = localhost #---> line no 116
#mydestination = $myhostname, localhost.$mydomain, localhost #--> line no 164
and add these lines at the bottom of the file.
myhostname = mail.keralainindia.asia
mydomain = keralainindia.asia
myorigin = $mydomain
home_mailbox = mail/
mynetworks = 127.0.0.0/8
inet_interfaces = all
mydestination = $myhostname, localhost.$mydomain, localhost, $mydomain
smtpd_sasl_auth_enable = yes
smtpd_sasl_type = cyrus
smtpd_sasl_security_options = noanonymous
broken_sasl_auth_clients = yes
smtpd_sasl_authenticated_header = yes
smtpd_recipient_restrictions = permit_sasl_authenticated,permit_mynetworks,reject_unauth_destination
smtpd_tls_auth_only = no
smtp_use_tls = yes
smtpd_use_tls = yes
smtp_tls_note_starttls_offer = yes
smtpd_tls_key_file = /etc/postfix/ssl/smtpd.key
smtpd_tls_cert_file = /etc/postfix/ssl/smtpd.crt
smtpd_tls_CAfile = /etc/postfix/ssl/cacert.pem
smtpd_tls_received_header = yes
smtpd_tls_session_cache_timeout = 3600s
tls_random_source = dev:/dev/urandom
Step 5 » Now open /etc/postfix/master.cf file and add the below line after smtp
smtps inet n - n - - smtpd
-o smtpd_sasl_auth_enable=yes
-o smtpd_reject_unlisted_sender=yes
-o smtpd_recipient_restrictions=permit_sasl_authenticated,reject
-o broken_sasl_auth_clients=yes
Step 6 » Now start postfix and saslauthd service
[root@kerala ~]# service postfix start
[root@kerala ~]# service saslauthd start
» Issue the below commands to start the postfix and saslauthd at startup
[root@kerala ~]# chkconfig --level 235 postfix on
[root@kerala ~]# chkconfig --level 235 saslauthd on
Step 7 » Now check your smtp connectivity . just telnet localhost on port 25 and type this command ehlo localhost
[root@kerala ~]# telnet localhost 25
Trying ::1...
Connected to localhost.
Escape character is '^]'.
220 mail.keralainindia.asia ESMTP Postfix
ehlo localhost <---- type this command
250-mail.keralainindia.asia
250-PIPELINING
250-SIZE 10240000
250-VRFY
250-ETRN
250-STARTTLS
250-AUTH PLAIN LOGIN
250-AUTH=PLAIN LOGIN
250-ENHANCEDSTATUSCODES
250-8BITMIME
250 DSN
quit
221 2.0.0 Bye
Connection closed by foreign host.
If you get this output .. Great .. everything is fine till now.
» Installing and configuring dovecot
Step 8 » Issue this command to install dovecot
[root@kerala ~]# yum -y install dovecot
Step 9 » After installation open /etc/dovecot/dovecot.conf file and add the below line at the end of the file. please make sure mail_location and home_mailbox in postfix configuration are using the same name.
protocols = imap pop3
mail_location = maildir:~/mail
pop3_uidl_format = %08Xu%08Xv
Step 10 » Now start dovecot service
[root@kerala ~]# service dovecot start
» Issue the below command to start the dovecot at startup
[root@kerala ~]# chkconfig --level 235 dovecot on
Step 11 » Now test your pop3 connectivity .
[root@kerala ~]# telnet localhost 110
Trying ::1...
Connected to localhost.
Escape character is '^]'.
+OK Dovecot ready.
quit
+OK Logging out
Connection closed by foreign host.
Yes .. your server is ready to receive mails .
» Creating users and testing
Step 11 » Now create users to test your configuration.
[root@kerala ~]# useradd -m ramraj -s /sbin/nologin
and create passwords for the users created
[root@kerala ~]# passwd ramraj
Step 12 » Now you can send and receive mails using this server. In case of any issues please check the log file /var/log/maillog )
your mail server is ready …
» Installing and configuring squirrelmail
Step 13 » you need to add EPEL repository to install squirrelmail package. you can find latest EPEL repository rpm here ( http://ftp.jaist.ac.jp/pub/Linux/Fedora/epel/6/i386/repoview/epel-release.html )
[root@kerala ~]# rpm -ivh http://ftp.jaist.ac.jp/pub/Linux/Fedora/epel/6/i386/epel-release-6-8.noarch.rpm
Step 14» Issue the below command to install squirrelmail.
[root@kerala ~]# yum install squirrelmail
This command will install squirrelmail along with apache and php.
Step 15 » Now run the below command to configure squirrelmail .
[root@kerala ~]# perl /usr/share/squirrelmail/config/conf.pl
»»» 1 »»» 1 »»» krizna (type Organization name ) »»» R ( return )
»»» 2 »»» 1 »»» ( hit space for empty Domain name ) and choose 3 »»» SMTP ( choose SMTP ) »»» R ( return )
»»» D »»» dovecot ( type ) »»» press enter with default
»»» s ( save and quit)
Step 16 » Open /etc/httpd/conf.d/squirrelmail.conf file and uncomment below lines if you are using ssl. If you are not using ssl Don't Do it.
# RewriteCond %{HTTPS} !=on
# RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI}
Step 17» start apache service
[root@kerala ~]# service httpd start
» Issue the below commands to start the httpd at startup
[root@kerala ~]# chkconfig --level 235 httpd on
Step 18» Now open http://serverip/webmail path in your browser .
» Installing and configuring postfix
» Installing and configuring dovecot
» Creating users and testing
» Installing and configuring squirrelmail
» Installing and configuring postfix
Step 1 »Before installation assign a static ip and add a host entry for your domain to that IP in the /etc/hosts file like below.
echo "23.236.147.74 keralainindia.asia" >> /etc/hosts
Step 2 » install postfix
[root@kerala ~]# yum -y install postfix
Step 3 » install SMTP AUTH packages .
[root@kerala ~]# yum -y install cyrus-sasl cyrus-sasl-devel cyrus-sasl-gssapi cyrus-sasl-md5 cyrus-sasl-plain
Postfix package installation is completed .
Step 4 »creating SSL Cert.
[root@kerala ~]# mkdir /etc/postfix/ssl
[root@kerala ~]# cd /etc/postfix/ssl/
[root@kerala ssl]# openssl genrsa -des3 -rand /etc/hosts -out smtpd.key 1024
[root@kerala ssl]# chmod 600 smtpd.key
[root@kerala ssl]# openssl req -new -key smtpd.key -out smtpd.csr
[root@kerala ssl]# openssl x509 -req -days 365 -in smtpd.csr -signkey smtpd.key -out smtpd.crt
[root@kerala ssl]# openssl rsa -in smtpd.key -out smtpd.key.unencrypted
[root@kerala ssl]# mv -f smtpd.key.unencrypted smtpd.key
[root@kerala ssl]# openssl req -new -x509 -extensions v3_ca -keyout cakey.pem -out cacert.pem -days 365
Step 4 » Now open /etc/postfix/main.cf file .
Find and comment the below lines .
#inet_interfaces = localhost #---> line no 116
#mydestination = $myhostname, localhost.$mydomain, localhost #--> line no 164
and add these lines at the bottom of the file.
myhostname = mail.keralainindia.asia
mydomain = keralainindia.asia
myorigin = $mydomain
home_mailbox = mail/
mynetworks = 127.0.0.0/8
inet_interfaces = all
mydestination = $myhostname, localhost.$mydomain, localhost, $mydomain
smtpd_sasl_auth_enable = yes
smtpd_sasl_type = cyrus
smtpd_sasl_security_options = noanonymous
broken_sasl_auth_clients = yes
smtpd_sasl_authenticated_header = yes
smtpd_recipient_restrictions = permit_sasl_authenticated,permit_mynetworks,reject_unauth_destination
smtpd_tls_auth_only = no
smtp_use_tls = yes
smtpd_use_tls = yes
smtp_tls_note_starttls_offer = yes
smtpd_tls_key_file = /etc/postfix/ssl/smtpd.key
smtpd_tls_cert_file = /etc/postfix/ssl/smtpd.crt
smtpd_tls_CAfile = /etc/postfix/ssl/cacert.pem
smtpd_tls_received_header = yes
smtpd_tls_session_cache_timeout = 3600s
tls_random_source = dev:/dev/urandom
Step 5 » Now open /etc/postfix/master.cf file and add the below line after smtp
smtps inet n - n - - smtpd
-o smtpd_sasl_auth_enable=yes
-o smtpd_reject_unlisted_sender=yes
-o smtpd_recipient_restrictions=permit_sasl_authenticated,reject
-o broken_sasl_auth_clients=yes
Step 6 » Now start postfix and saslauthd service
[root@kerala ~]# service postfix start
[root@kerala ~]# service saslauthd start
» Issue the below commands to start the postfix and saslauthd at startup
[root@kerala ~]# chkconfig --level 235 postfix on
[root@kerala ~]# chkconfig --level 235 saslauthd on
Step 7 » Now check your smtp connectivity . just telnet localhost on port 25 and type this command ehlo localhost
[root@kerala ~]# telnet localhost 25
Trying ::1...
Connected to localhost.
Escape character is '^]'.
220 mail.keralainindia.asia ESMTP Postfix
ehlo localhost <---- type this command
250-mail.keralainindia.asia
250-PIPELINING
250-SIZE 10240000
250-VRFY
250-ETRN
250-STARTTLS
250-AUTH PLAIN LOGIN
250-AUTH=PLAIN LOGIN
250-ENHANCEDSTATUSCODES
250-8BITMIME
250 DSN
quit
221 2.0.0 Bye
Connection closed by foreign host.
If you get this output .. Great .. everything is fine till now.
» Installing and configuring dovecot
Step 8 » Issue this command to install dovecot
[root@kerala ~]# yum -y install dovecot
Step 9 » After installation open /etc/dovecot/dovecot.conf file and add the below line at the end of the file. please make sure mail_location and home_mailbox in postfix configuration are using the same name.
protocols = imap pop3
mail_location = maildir:~/mail
pop3_uidl_format = %08Xu%08Xv
Step 10 » Now start dovecot service
[root@kerala ~]# service dovecot start
» Issue the below command to start the dovecot at startup
[root@kerala ~]# chkconfig --level 235 dovecot on
Step 11 » Now test your pop3 connectivity .
[root@kerala ~]# telnet localhost 110
Trying ::1...
Connected to localhost.
Escape character is '^]'.
+OK Dovecot ready.
quit
+OK Logging out
Connection closed by foreign host.
Yes .. your server is ready to receive mails .
» Creating users and testing
Step 11 » Now create users to test your configuration.
[root@kerala ~]# useradd -m ramraj -s /sbin/nologin
and create passwords for the users created
[root@kerala ~]# passwd ramraj
Step 12 » Now you can send and receive mails using this server. In case of any issues please check the log file /var/log/maillog )
your mail server is ready …
» Installing and configuring squirrelmail
Step 13 » you need to add EPEL repository to install squirrelmail package. you can find latest EPEL repository rpm here ( http://ftp.jaist.ac.jp/pub/Linux/Fedora/epel/6/i386/repoview/epel-release.html )
[root@kerala ~]# rpm -ivh http://ftp.jaist.ac.jp/pub/Linux/Fedora/epel/6/i386/epel-release-6-8.noarch.rpm
Step 14» Issue the below command to install squirrelmail.
[root@kerala ~]# yum install squirrelmail
This command will install squirrelmail along with apache and php.
Step 15 » Now run the below command to configure squirrelmail .
[root@kerala ~]# perl /usr/share/squirrelmail/config/conf.pl
»»» 1 »»» 1 »»» krizna (type Organization name ) »»» R ( return )
»»» 2 »»» 1 »»» ( hit space for empty Domain name ) and choose 3 »»» SMTP ( choose SMTP ) »»» R ( return )
»»» D »»» dovecot ( type ) »»» press enter with default
»»» s ( save and quit)
Step 16 » Open /etc/httpd/conf.d/squirrelmail.conf file and uncomment below lines if you are using ssl. If you are not using ssl Don't Do it.
# RewriteCond %{HTTPS} !=on
# RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI}
Step 17» start apache service
[root@kerala ~]# service httpd start
» Issue the below commands to start the httpd at startup
[root@kerala ~]# chkconfig --level 235 httpd on
Step 18» Now open http://serverip/webmail path in your browser .
Subscribe to:
Posts (Atom)