Skip to content
Snippets Groups Projects
machine_prep_notes.sv.txt 4.5 KiB
Newer Older
sveseli's avatar
sveseli committed
# Demo environment consists of three linux VMs: 
#     - data acquisition (DAQ), data storage (DS), sge cluster (HPC) nodes
#     - CentOS 6.6, 64-bit
#     - no shared storage
#     - DS node runs PostgreSQL database server, Web Portal, DS Web Service, 
#       CAT Web Service, MongoDB server
#     - DAQ node runs DAQ Web Service
#     - HPC node runs SGE cluster

# Machine Preparation
# ===================

# install dependencies (all machines)
yum install -y gcc libgcc expect zlib-devel openssl-devel openldap-devel subversion make sed gawk autoconf automake wget readline-devel

# Download globus RPM repo and install gridftp (both machines)
# http://toolkit.globus.org/ftppub/gt6/installers/repo/globus-toolkit-repo-latest.noarch.rpm
yum install globus-gridftp

# Disable requiredtty in /etc/sudoers

# Prepare gridftp server to use sshd (dmstorage machine)
globus-gridftp-server-enable-sshftp

# create system (dm) account on both machines, configure ssh-keys and 
# authorized_keys files

# create several user accounts (dmstorage machine): dmuser1, dmuser2, dmuser3

# build and install epics base and SDDS/SDDSepics extensions under 
# /opt/epics (dmstorage machine)
# build SDDS python under /opt/epics/extensions/src/SDDS/python/
# copy sdds.py into /opt/DM/support/python/linux-x86_64/lib/python2.7/
# copy /opt/epics/extensions/src/SDDS/python/O.linux-x86_64/sddsdatamodule.so
# into /opt/DM/support/python/linux-x86_64/lib/python2.7/lib-dynload/

# export /opt/DM to dmhpc node
# yum install nfs-util
# edit /etc/exports and add /opt/DM 192.168.100.8(rw,sync)
# exportfs -a
# restart nfs

# install sge on hpc machine, add dmstorage as submission node, 
# copy /opt/sge to dmstorage

# configure /opt/DM area for software installation
mkdir -p /opt/DM
chown -R dm.dm /opt/DM
chmod 755 /opt/DM

# configure (or disable) firewall (both machines)
/etc/init.d/iptables stop

# DM Deployment: DS Machine
# =========================

# Log into dmstorage node and create local DM deployment directory 
# in dm user home area
cd /opt/DM
ls -l

# Checkout code as release 0.2
svn co https://subversion.xray.aps.anl.gov/DataManagement/trunk dev

# Build support area
cd dev
make support

# Source setup 
source setup.sh

# Create db
make db

# Configure Web Portal
# Note:
#   - this needs to be done only during the first portal deployment,
#     or after portal has been unconfigured explicitly
#   - this step configures DB access
#   - adds initial DM system user to the DB
make configure-web-portal

# Add few users
#dm-add-user --username dmuser1 --first-name Test --last-name User1
#dm-add-user --username dmuser2 --first-name Test --last-name User2
#dm-add-user --username dmuser3 --first-name Test --last-name User3

# Deploy Web Portal
# Note:
#   - deploys portal war file into glassfish
#   - after this step, users can access portal at
#     https://dmstorage.svdev.net:8181/dm
make deploy-web-portal

# Deploy DS Web Service
# Note:
#   - generates SSL certificates and configuration files 
#   - after this step, DS web service is accessible at port 22236 
#   - log files are under DM/var/log
#   - configuration files are under DM/etc
#   - user setup file is DM/etc/dm.setup.sh
#   - service control script is under DM/dm-0.2/etc/init.d 
make deploy-ds-web-service

# Check functionality. Open second terminal and log into dmstorage node
# as user sveseli
# Source setup file to get access to DM commands
source /opt/DM/etc/dm.setup.sh

# Get user list as administrator (dm) account 
dm-get-users

# DM Deployment: DAQ Machine/HPC Machine
# ======================================

# Log into dmdaq node and create local DM deployment directory 
# in dm user home area
cd /opt/DM
ls -l

# Checkout code as release 0.2
svn co https://subversion.xray.aps.anl.gov/DataManagement/trunk dev

# Build support area 
# Note the following:
#   - since demo machines are identical, we could simply copy support/dm code
#     from the storage node; this is not necessarily the case in general
#   - support area and DM code distribution can be shared between DAQ and DS
#     nodes
#   - support area on the daq node is much lighter (i.e., no need
#     for glassfish, etc.)
cd dev
make support-daq

# Source setup 
source setup.sh

# Deploy DAQ Web Service
# Note:
#   - requires storage node to be installed 
#   - generates SSL certificates and configuration files 
#   - after this step, DAQ web service is accessible at port 33336 
#   - log files are under DM/var/log
#   - configuration files are under DM/etc
#   - user setup file is DM/etc/dm.setup.sh
make deploy-daq-web-service