Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • DM/dm-docs
  • hammonds/dm-docs
  • hparraga/dm-docs
3 results
Show changes
Showing
with 1752 additions and 0 deletions
# Demo environment consists of three linux VMs:
# - data acquisition (DAQ), data storage (DS), sge cluster (HPC) nodes
# - CentOS 6.6, 64-bit
# - no shared storage
# - DS node runs PostgreSQL database server, Web Portal, DS Web Service,
# CAT Web Service, MongoDB server
# - DAQ node runs DAQ Web Service
# - HPC node runs SGE cluster
# Machine Preparation
# ===================
# install dependencies (all machines)
yum install -y gcc libgcc expect zlib-devel openssl-devel openldap-devel subversion make sed gawk autoconf automake wget readline-devel
# Download globus RPM repo and install gridftp (both machines)
# http://toolkit.globus.org/ftppub/gt6/installers/repo/globus-toolkit-repo-latest.noarch.rpm
yum install globus-gridftp
# Disable requiredtty in /etc/sudoers
# Prepare gridftp server to use sshd (dmstorage machine)
globus-gridftp-server-enable-sshftp
# create system (dm) account on both machines, configure ssh-keys and
# authorized_keys files
# create several user accounts (dmstorage machine): dmuser1, dmuser2, dmuser3
# build and install epics base and SDDS/SDDSepics extensions under
# /opt/epics (dmstorage machine)
# build SDDS python under /opt/epics/extensions/src/SDDS/python/
# copy sdds.py into /opt/DM/support/python/linux-x86_64/lib/python2.7/
# copy /opt/epics/extensions/src/SDDS/python/O.linux-x86_64/sddsdatamodule.so
# into /opt/DM/support/python/linux-x86_64/lib/python2.7/lib-dynload/
# export /opt/DM to dmhpc node
# yum install nfs-util
# edit /etc/exports and add /opt/DM 192.168.100.8(rw,sync)
# exportfs -a
# restart nfs
# install sge on hpc machine, add dmstorage as submission node,
# copy /opt/sge to dmstorage
# configure /opt/DM area for software installation
mkdir -p /opt/DM
chown -R dm.dm /opt/DM
chmod 755 /opt/DM
# configure (or disable) firewall (both machines)
/etc/init.d/iptables stop
# DM Deployment: DS Machine
# =========================
# Log into dmstorage node and create local DM deployment directory
# in dm user home area
cd /opt/DM
ls -l
# Checkout code as release 0.2
svn co https://subversion.xray.aps.anl.gov/DataManagement/trunk dev
# Build support area
cd dev
make support
# Source setup
source setup.sh
# Create db
make db
# Configure Web Portal
# Note:
# - this needs to be done only during the first portal deployment,
# or after portal has been unconfigured explicitly
# - this step configures DB access
# - adds initial DM system user to the DB
make configure-web-portal
# Add few users
#dm-add-user --username dmuser1 --first-name Test --last-name User1
#dm-add-user --username dmuser2 --first-name Test --last-name User2
#dm-add-user --username dmuser3 --first-name Test --last-name User3
# Deploy Web Portal
# Note:
# - deploys portal war file into glassfish
# - after this step, users can access portal at
# https://dmstorage.svdev.net:8181/dm
make deploy-web-portal
# Deploy DS Web Service
# Note:
# - generates SSL certificates and configuration files
# - after this step, DS web service is accessible at port 22236
# - log files are under DM/var/log
# - configuration files are under DM/etc
# - user setup file is DM/etc/dm.setup.sh
# - service control script is under DM/dm-0.2/etc/init.d
make deploy-ds-web-service
# Check functionality. Open second terminal and log into dmstorage node
# as user sveseli
# Source setup file to get access to DM commands
source /opt/DM/etc/dm.setup.sh
# Get user list as administrator (dm) account
dm-get-users
# DM Deployment: DAQ Machine/HPC Machine
# ======================================
# Log into dmdaq node and create local DM deployment directory
# in dm user home area
cd /opt/DM
ls -l
# Checkout code as release 0.2
svn co https://subversion.xray.aps.anl.gov/DataManagement/trunk dev
# Build support area
# Note the following:
# - since demo machines are identical, we could simply copy support/dm code
# from the storage node; this is not necessarily the case in general
# - support area and DM code distribution can be shared between DAQ and DS
# nodes
# - support area on the daq node is much lighter (i.e., no need
# for glassfish, etc.)
cd dev
make support-daq
# Source setup
source setup.sh
# Deploy DAQ Web Service
# Note:
# - requires storage node to be installed
# - generates SSL certificates and configuration files
# - after this step, DAQ web service is accessible at port 33336
# - log files are under DM/var/log
# - configuration files are under DM/etc
# - user setup file is DM/etc/dm.setup.sh
make deploy-daq-web-service
# Need 1 terminal on DAQ node, 1 terminal on HPC node and 2 terminals on DS
# node
#####################################################################
# Prepare ahead of time
# ssh sveseli@dmstorage: add experiment e1, mm2
source /opt/DM/etc/dm.setup.sh
dm-add-experiment --name e1 --type-id 1
dm-add-experiment --name mm2 --type-id 1
# ssh dm@dmstorage: add few users
source /opt/DM/etc/dm.setup.sh
dm-add-user --username dmuser1 --first-name Test --last-name User1
dm-add-user --username dmuser2 --first-name Test --last-name User2
dm-add-user --username dmuser3 --first-name Test --last-name User3
#####################################################################
# Initialize demo
# ssh -X dm@dmstorage: start services
cd /opt/DM/dev
source setup.sh
./etc/init.d/dm-postgresql start
./etc/init.d/dm-glassfish start
./etc/init.d/dm-mongodb start
./etc/init.d/dm-ds-web-service start
./etc/init.d/dm-cat-web-service start
# ssh -X dm@dmdaq: start services
cd /opt/DM/dev
source setup.sh
./etc/init.d/dm-daq-web-service start
# ssh -X dm@dmhpc: start services, check NFS, check SGE
cd /opt/DM/dev
source setup.sh
./etc/init.d/dm-daq-web-service start
ls -l /net/dmstorage/opt/DM
source /opt/sge/default/common/settings.sh
qstat -f
#
# Check portal: https://dmstorage.svdev.net:8181/dm
#
#####################################################################
#
# Log into portal as dm admin: https://dmstorage.svdev.net:8181/dm
#
# Show users
# Show experiments
# Add dmuser1 to experiment e1
#
# SCENARIO 2: UPLOAD + METADATA CATALOG
#
# dm@dmstorage: check directory content on the storage node, should be empty
ls -l /opt/DM/data
# dm@dmstorage: check dmuser1 user, note list of groups
id dmuser1
# ssh sveseli@dmstorage: start experiment e1
source /opt/DM/etc/dm.setup.sh
dm-start-experiment --name e1
# dm@dmstorage: check directory content on the storage node
# note that experiment directory permissions are restricted
ls -l /opt/DM/data/ESAF
ls -l /opt/DM/data/ESAF/e1/
# dm@dmstorage: check dmuser1 user, note user belongs to new experiment group
id dmuser1
# sveseli@dmstorage: show there are no experiment files in catalogging service
dm-get-experiment-files --experiment e1
# ssh dm@dmdaq: source setup file, show test data
source /opt/DM/etc/dm.setup.sh
ls -lR /opt/DM/experiments/e1
cat /opt/DM/experiments/e1/file1
# dm@dmdaq: upload data for experiment e1, specify few arbitrary keys
dm-upload --experiment e1 --data-directory /opt/DM/experiments/e1 ownerUser:JohnC ownerGroup:APSU memo1:ApprovedByNDA memo2:DislikedByGD
# dm@dmstorage: check experiment storage directory content
# note permissions, ownership
ls -l /opt/DM/data/ESAF/e1/
ls -l /opt/DM/data/ESAF/e1/2015/07/09/
# sveseli@dmstorage: get metadata for experiment files from catalogging service
dm-get-experiment-files --experiment e1
dm-get-experiment-file --experiment e1 --file file2 --display-keys=__all__
# sveseli@dmstorage: show metadata updates
dm-update-experiment-file --experiment e1 --file file3 quality:A --display-keys=id,fileName,quality
# sveseli@dmstorage: show metadata search
dm-get-experiment-files --experiment e1 quality:A
dm-get-experiment-files --experiment e1 storageFilePath:2015
#
# SCENARIO 6: DAQ + METADATA CATALOG + SDDS PARAMETERS + HPC PROCESSING
#
# sveseli@dmstorage: add and start experiment mm2
dm-start-experiment --name mm2
# sveseli@dmstorage: get mm2 files, note no files
dm-get-experiment-files --experiment mm2
# dm@dmstorage: show no files in experiment directory
ls -l /opt/DM/data/ESAF
ls -l /opt/DM/data/ESAF/mm2
# dm@dmstorage: show processing script
more /opt/DM/processing/sge_sdds_analysis.sh
# dm@dmstorage: tail log file to observe processing
tail -f /opt/DM/var/log/dm.ds-web-service.log
# dm@dmhpc: show qstat
watch -d 'qstat -f'
# dm@dmdaq: start DAQ for experiment mm2, request SDDS parameter
# processing, specify SGE processing script
rm -rf /tmp/data/mm2
mkdir -p /tmp/data/mm2
dm-start-daq --experiment mm2 --data-directory /tmp/data/mm2 processSddsParameters:True sgeJobScript:/opt/DM/processing/sge_sdds_analysis.sh
# dm@dmdaq: copy experiment mm2 files into observed directory, watch qstat
ls -l /opt/DM/experiments/mm2/
cp /opt/DM/experiments/mm2/* /tmp/data/mm2/ && sleep 5 && touch /tmp/data/mm2/* &
tail -f /opt/DM/var/log/dm.daq-web-service.log
# sveseli@dmstorage: get mm2 files, note original + processed files
dm-get-experiment-files --experiment mm2
# dm@dmstorage: show png files in experiment directory
ls -l /opt/DM/data/ESAF/mm2/*.png
# sveseli@dmstorage: get one mm2 .edf file, note SDDS parameters in metadata
dm-get-experiment-file --experiment mm2 --file `dm-get-experiment-files --experiment mm2 --display-keys=fileName | grep -v png | head -1 | cut -f2 -d '='` --display-keys=__all__ --display-format=dict
# sveseli@dmstorage: get mm2 .png files, note parentFile key
dm-get-experiment-files --experiment mm2 fileName:.png --display-keys=fileName,parentFile --display-format=dict
# dm@dmstorage: open one processed file
xdg-open `ls -c1 /opt/DM/data/ESAF/mm2/*.png | head -1`
# dm@dmdaq: stop DAQ for experiment mm2
dm-stop-daq --experiment mm2
# Demo environment consists of three linux VMs:
# - data acquisition (DAQ), data storage (DS), sge cluster (HPC) nodes
# - CentOS 6.6, 64-bit
# - no shared storage
# - DS node runs PostgreSQL database server, Web Portal, DS Web Service,
# CAT Web Service, MongoDB server
# - DAQ node runs DAQ Web Service
# - HPC node runs SGE cluster
# Machine Preparation
# ===================
# install dependencies (all machines)
yum install -y gcc libgcc expect zlib-devel openssl-devel openldap-devel subversion make sed gawk autoconf automake wget readline-devel
# Download globus RPM repo and install gridftp (both machines)
# http://toolkit.globus.org/ftppub/gt6/installers/repo/globus-toolkit-repo-latest.noarch.rpm
yum install globus-gridftp
# Disable requiredtty in /etc/sudoers
# Prepare gridftp server to use sshd (dmstorage machine)
globus-gridftp-server-enable-sshftp
# create system (dm) account on both machines, configure ssh-keys and
# authorized_keys files
# create several user accounts (dmstorage machine): dmuser1, dmuser2, dmuser3
# build and install epics base and SDDS/SDDSepics extensions under
# /opt/epics (dmstorage machine)
# build SDDS python under /opt/epics/extensions/src/SDDS/python/
# copy sdds.py into /opt/DM/support/python/linux-x86_64/lib/python2.7/
# copy /opt/epics/extensions/src/SDDS/python/O.linux-x86_64/sddsdatamodule.so
# into /opt/DM/support/python/linux-x86_64/lib/python2.7/lib-dynload/
# export /opt/DM to dmhpc node
# yum install nfs-util
# edit /etc/exports and add /opt/DM 192.168.100.8(rw,sync)
# exportfs -a
# restart nfs
# install sge on hpc machine, add dmstorage as submission node,
# copy /opt/sge to dmstorage
# configure /opt/DM area for software installation
mkdir -p /opt/DM
chown -R dm.dm /opt/DM
chmod 755 /opt/DM
# configure (or disable) firewall (both machines)
/etc/init.d/iptables stop
# DM Deployment: DS Machine
# =========================
# Log into dmstorage node and create local DM deployment directory
# in dm user home area
cd /opt/DM
ls -l
# Checkout code as release 0.2
svn co https://subversion.xray.aps.anl.gov/DataManagement/trunk dev
# Build support area
cd dev
make support
# Source setup
source setup.sh
# Create db
make db
# Configure Web Portal
# Note:
# - this needs to be done only during the first portal deployment,
# or after portal has been unconfigured explicitly
# - this step configures DB access
# - adds initial DM system user to the DB
make configure-web-portal
# Add few users
#dm-add-user --username dmuser1 --first-name Test --last-name User1
#dm-add-user --username dmuser2 --first-name Test --last-name User2
#dm-add-user --username dmuser3 --first-name Test --last-name User3
# Deploy Web Portal
# Note:
# - deploys portal war file into glassfish
# - after this step, users can access portal at
# https://dmstorage.svdev.net:8181/dm
make deploy-web-portal
# Deploy DS Web Service
# Note:
# - generates SSL certificates and configuration files
# - after this step, DS web service is accessible at port 22236
# - log files are under DM/var/log
# - configuration files are under DM/etc
# - user setup file is DM/etc/dm.setup.sh
# - service control script is under DM/dm-0.2/etc/init.d
make deploy-ds-web-service
# Check functionality. Open second terminal and log into dmstorage node
# as user sveseli
# Source setup file to get access to DM commands
source /opt/DM/etc/dm.setup.sh
# Get user list as administrator (dm) account
dm-get-users
# DM Deployment: DAQ Machine/HPC Machine
# ======================================
# Log into dmdaq node and create local DM deployment directory
# in dm user home area
cd /opt/DM
ls -l
# Checkout code as release 0.2
svn co https://subversion.xray.aps.anl.gov/DataManagement/trunk dev
# Build support area
# Note the following:
# - since demo machines are identical, we could simply copy support/dm code
# from the storage node; this is not necessarily the case in general
# - support area and DM code distribution can be shared between DAQ and DS
# nodes
# - support area on the daq node is much lighter (i.e., no need
# for glassfish, etc.)
cd dev
make support-daq
# Source setup
source setup.sh
# Deploy DAQ Web Service
# Note:
# - requires storage node to be installed
# - generates SSL certificates and configuration files
# - after this step, DAQ web service is accessible at port 33336
# - log files are under DM/var/log
# - configuration files are under DM/etc
# - user setup file is DM/etc/dm.setup.sh
make deploy-daq-web-service
# Demo environment consists of two linux VMs:
# - data acquisition (DAQ) and data storage (DS) nodes
# - CentOS 6.6, 64-bit
# - no shared storage
# - DS node runs database server, Web Portal and DS Web Service
# - DAQ node runs DAQ Web Service
# Machine Preparation
# ===================
# install dependencies (both machines)
yum install -y gcc libgcc expect zlib-devel openssl-devel openldap-devel subversion make sed gawk autoconf automake wget readline-devel
# create system (dm) account on both machines, configure ssh-keys and
# authorized_keys files
# configure /opt/DM area for software installation
mkdir -p /opt/DM
chown -R dm.dm /opt/DM
chmod 755 /opt/DM
# configure (or disable) firewall (both machines)
/etc/init.d/iptables stop
# DM Deployment: DS Machine
# =========================
# Log into dmstorage node and create local DM deployment directory
# in dm user home area
cd /opt/DM
ls -l
# Checkout code as release 0.1
svn co https://subversion.xray.aps.anl.gov/DataManagement/tags/20150421 dm-0.1
# Build support area
cd dm-0.1
make support
# Source setup
source setup.sh
# Create db
make db
# Configure Web Portal
# Note:
# - this needs to be done only during the first portal deployment,
# or after portal has been unconfigured explicitly
# - this step configures DB access
make configure-web-portal
# Deploy Web Portal
# Note:
# - deploys portal war file into glassfish
# - after this step, users can access portal at
# https://dmstorage.svdev.net:8181/dm
make deploy-web-portal
# Deploy DS Web Service
# Note:
# - generates SSL certificates and configuration files
# - after this step, DS web service is accessible at port 22236
# - log files are under DM/var/log
# - configuration files are under DM/etc
# - user setup file is DM/etc/dm.setup.sh
# - service control script is under DM/dm-0.1/etc/init.d
make deploy-ds-web-service
# Check functionality. Open second terminal and log into dmstorage node
# as user sveseli
# Source setup file to get access to DM commands
source /opt/DM/etc/dm.setup.sh
# Attempt to get list of users as user sveseli, should result
# in authorization error
# Note:
# - every command comes with common set of options
dm-get-users -h
dm-get-users --version
dm-get-users
echo $?
# Repeat command, this time us administrator (dm) account
dm-get-users
# Repeat command, note that session with DS service has been established, so no
# more password prompts until session expires
cat ~/.dm/.ds.session.cache
dm-get-users
# DM Deployment: DAQ Machine
# ==========================
# Log into dmdaq node and create local DM deployment directory
# in dm user home area
cd /opt/DM
ls -l
# Checkout code as release 0.1
svn co https://subversion.xray.aps.anl.gov/DataManagement/tags/20150421 dm-0.1
# Build support area
# Note the following:
# - since demo machines are identical, we could simply copy support/dm code
# from the storage node; this is not necessarily the case in general
# - support area and DM code distribution can be shared between DAQ and DS
# nodes
# - support area on the daq node is much lighter (i.e., no need
# for glassfish, etc.)
cd dm-0.1
make support-daq
# Source setup
source setup.sh
# Deploy DAQ Web Service
# Note:
# - requires storage node to be installed
# - generates SSL certificates and configuration files
# - after this step, DAQ web service is accessible at port 33336
# - log files are under DM/var/log
# - configuration files are under DM/etc
# - user setup file is DM/etc/dm.setup.sh
make deploy-daq-web-service
# DM Functionality: DAQ
# =====================
# add new experiment (sveseli@dmstorage)
dm-add-experiment -h
dm-add-experiment --name exp1 --type-id 1 --description test
dm-get-experiments
dm-get-experiment --name exp1
dm-get-experiment --name exp1 --display-keys=__all__
# check directory content on the storage node (dm@dmstorage)
ls -l /opt/DM/data
# start experiment (sveseli@dmstorage)
dm-start-experiment --name exp1
# check directory content on the storage node (dm@dmstorage)
ls -l /opt/DM/data
ls -l /opt/DM/data/ESAF
ls -l /opt/DM/data/ESAF/exp1/
# at this point we can log into the portal to see experiment that was created
# observe that start time is entered correctly
# in the first terminal on the daq node, tail log file (dm@dmdaq)
tail -f /opt/DM/var/log/dm.daq-web-service.log
# open second terminal for daq node, login as system (dm) user
# source setup file (dm@dmdaq)
cat /opt/DM/etc/dm.setup.sh
source /opt/DM/etc/dm.setup.sh
# prepare DAQ directory for this experiment (dm@dmdaq)
mkdir -p /tmp/data/exp1
# start DAQ (dm@dmdaq)
dm-start-daq -h
dm-start-daq --experiment exp1 --data-directory /tmp/data/exp1
# create test file in the DAQ directory (daq node)
# observe log file entries, point out file transfer
touch /tmp/data/exp1/file1
echo "Hello there, data management is here" > /tmp/data/exp1/file1
# check directory content on the storage node (dm@dmstorage)
# file1 should be transferred
ls -l /opt/DM/data/ESAF/exp1/
# stop DAQ (dm@dmdaq)
dm-stop-daq -h
dm-stop-daq --experiment exp1
# DM Functionality: Upload
# ========================
# prepare data directory we want to upload (dm@dmdaq)
mkdir -p /tmp/data/exp1/2015/04/21
echo "this is file 2" > /tmp/data/exp1/2015/04/21/file2
echo "this is file 3" > /tmp/data/exp1/2015/04/21/file3
# check directory content on the storage node (dm@dmstorage)
ls -l /opt/DM/data/ESAF/exp1/
# upload data (dm@dmdaq)
dm-upload -h
dm-upload --experiment exp1 --data-directory /tmp/data/exp1
# check directory content on the storage node (dm@dmstorage)
ls -l /opt/DM/data/ESAF/exp1/
ls -l /opt/DM/data/ESAF/exp1/2015/04/21/
cat /opt/DM/data/ESAF/exp1/2015/04/21/file3
# stop experiment (sveseli@dmstorage)
dm-stop-experiment --name exp1
# at this point we can log into the portal to see modified experiment
# observe that end time is entered correctly
# Demo environment consists of two linux VMs:
# - data acquisition (DAQ) and data storage (DS) nodes
# - CentOS 6.6, 64-bit
# - no shared storage
# - DS node runs database server, Web Portal and DS Web Service
# - DAQ node runs DAQ Web Service
# Machine Preparation
# ===================
# install dependencies (both machines)
yum install -y gcc libgcc expect zlib-devel openssl-devel openldap-devel subversion make sed gawk autoconf automake wget readline-devel
# Download globus RPM repo and install gridftp (both machines)
# http://toolkit.globus.org/ftppub/gt6/installers/repo/globus-toolkit-repo-latest.noarch.rpm
yum install globus-gridftp
# Disable requiredtty in /etc/sudoers
# Prepare gridftp server to use sshd (dmstorage machine)
globus-gridftp-server-enable-sshftp
# create system (dm) account on both machines, configure ssh-keys and
# authorized_keys files
# create several user accounts (dmstorage machine): dmuser1, dmuser2, dmuser3
# build and install epics base and SDDS/SDDSepics extensions under
# /opt/epics (dmstorage machine)
# configure /opt/DM area for software installation
mkdir -p /opt/DM
chown -R dm.dm /opt/DM
chmod 755 /opt/DM
# configure (or disable) firewall (both machines)
/etc/init.d/iptables stop
# DM Deployment: DS Machine
# =========================
# Log into dmstorage node and create local DM deployment directory
# in dm user home area
cd /opt/DM
ls -l
# Checkout code as release 0.2
svn co https://subversion.xray.aps.anl.gov/DataManagement/tags/20150630 dm-0.2
# Build support area
cd dm-0.2
make support
# Source setup
source setup.sh
# Create db
make db
# Configure Web Portal
# Note:
# - this needs to be done only during the first portal deployment,
# or after portal has been unconfigured explicitly
# - this step configures DB access
# - adds initial DM system user to the DB
make configure-web-portal
# The above step used two new utilities that go directly to the db:
dm-add-user -h
dm-add-user-system-role -h
# Add few users
dm-add-user --username dmuser1 --first-name Test --last-name User1
dm-add-user --username dmuser2 --first-name Test --last-name User2
dm-add-user --username dmuser3 --first-name Test --last-name User3
# Deploy Web Portal
# Note:
# - deploys portal war file into glassfish
# - after this step, users can access portal at
# https://dmstorage.svdev.net:8181/dm
make deploy-web-portal
# Show no sudo functionality for DM account
sudo -l
# Deploy DS Web Service
# Note:
# - generates SSL certificates and configuration files
# - after this step, DS web service is accessible at port 22236
# - log files are under DM/var/log
# - configuration files are under DM/etc
# - user setup file is DM/etc/dm.setup.sh
# - service control script is under DM/dm-0.2/etc/init.d
make deploy-ds-web-service
# Show sudo functionality for DM account that enables group/permission
# management
sudo -l
# Check functionality. Open second terminal and log into dmstorage node
# as user sveseli
# Source setup file to get access to DM commands
source /opt/DM/etc/dm.setup.sh
# Get user list as administrator (dm) account
dm-get-users
# DM Deployment: DAQ Machine
# ==========================
# Log into dmdaq node and create local DM deployment directory
# in dm user home area
cd /opt/DM
ls -l
# Checkout code as release 0.2
svn co https://subversion.xray.aps.anl.gov/DataManagement/tags/20150630 dm-0.2
# Build support area
# Note the following:
# - since demo machines are identical, we could simply copy support/dm code
# from the storage node; this is not necessarily the case in general
# - support area and DM code distribution can be shared between DAQ and DS
# nodes
# - support area on the daq node is much lighter (i.e., no need
# for glassfish, etc.)
cd dm-0.2
make support-daq
# Source setup
source setup.sh
# Deploy DAQ Web Service
# Note:
# - requires storage node to be installed
# - generates SSL certificates and configuration files
# - after this step, DAQ web service is accessible at port 33336
# - log files are under DM/var/log
# - configuration files are under DM/etc
# - user setup file is DM/etc/dm.setup.sh
make deploy-daq-web-service
# DM Functionality: DAQ
# =====================
# add new experiment and couple of users (sveseli@dmstorage)
dm-add-experiment --name exp1 --type-id 1 --description test
dm-add-user-experiment-role --username dmuser1 --experiment exp1 --role=User
dm-add-user-experiment-role --username dmuser2 --experiment exp1 --role=User
# Note that dmuser1 and 2 are on the list of experiment users
dm-get-experiments
dm-get-experiment --name exp1 --display-keys=__all__
# check directory content on the storage node (dm@dmstorage)
ls -l /opt/DM/data
# Show that unix account corresponding to dmuser1 has no special groups
# associated with it
id dmuser1
# Show there is no exp1 unix group
grep exp1 /etc/group
# start experiment (sveseli@dmstorage)
dm-start-experiment --name exp1
# Show there is now exp1 unix group
grep exp1 /etc/group
# check directory content on the storage node (dm@dmstorage)
# note that experiment directory permissions are restricted
ls -l /opt/DM/data/ESAF
ls -l /opt/DM/data/ESAF/exp1/
# Check experiment user groups: only 1 and 2 should have new group assigned
# to them
id dmuser1
id dmuser2
id dmuser3
# in the first terminal on the storage node, tail log file (dm@dmdstorage)
tail -f /opt/DM/var/log/dm.ds-web-service.log
# in the first terminal on the daq node, tail log file (dm@dmdaq)
tail -f /opt/DM/var/log/dm.daq-web-service.log
# open second terminal for daq node, login as system (dm) user
# source setup file (dm@dmdaq)
source /opt/DM/etc/dm.setup.sh
# prepare DAQ directory for this experiment (dm@dmdaq)
mkdir -p /tmp/data/exp1
# create test file in the DAQ directory (daq node)
# observe log file entries, point out file transfer
echo "Hello there, data management is here" > /tmp/data/exp1/file1
# check directory content on the storage node (dm@dmstorage)
# file1 should be transferred
ls -l /opt/DM/data/ESAF/exp1/
# upload data (dm@dmdaq)
dm-upload --experiment exp1 --data-directory /tmp/data/exp1
# check directory content on the storage node (dm@dmstorage)
# file1 should be transferred
# note permissions
ls -l /opt/DM/data/ESAF/exp1/
# as root@dmstorage, su into dmuser1 account and try to read data
# should work
cat /opt/DM/data/ESAF/exp1/file1
# as root@dmstorage, su into dmuser3 account and try to read data
# should fail
cat /opt/DM/data/ESAF/exp1/file1
# Demonstrate retries: show config file
vi /opt/DM/etc/dm.daq-web-service.conf
# As root@dmdaq, temporarily move rsync
mv /usr/bin/rsync /usr/bin/rsync.orig
# upload new data (dm@dmdaq), observe how transfer fails
echo "Hello there, data management is here again" > /tmp/data/exp1/file1
dm-upload --experiment exp1 --data-directory /tmp/data/exp1
# As root@dmdaq, restore rsync, observe how transfer succeeds
mv /usr/bin/rsync.orig /usr/bin/rsync
# check directory content on the storage node (dm@dmstorage)
# file1 should be transferred
ls -l /opt/DM/data/ESAF/exp1/
# Demonstrate gridftp plugin
# Edit config file as dm@dmdaq, comment out rsync plugin, uncomment gridftp
# plugin; restart service
vi /opt/DM/etc/dm.daq-web-service.conf
./etc/init.d/dm-daq-web-service restart
tail -f /opt/DM/var/log/dm.daq-web-service.log
# upload new data (dm@dmdaq), observe how transfer succeeds
echo "Hello there, data management is here yet again" > /tmp/data/exp1/file1
dm-upload --experiment exp1 --data-directory /tmp/data/exp1
# stop experiment (sveseli@dmstorage)
dm-stop-experiment --name exp1
cd /local/DmSystem/dm
source setup.sh
make deploy-cat-web-service
cd /local/DmSystem/support
./bin/install_node.sh
cd /local/DmSystem/support/node/linux-x86_64/bin/node_modules/mongo-express/
cp config.default.js config.js
vi config.js
export PATH=/local/DmSystem/support/node/linux-x86_64/bin:$PATH
../forever/bin/forever start app.js
../forever/bin/forever list
RHEL7 Packages
===============
make
autoconf
expect
gcc
g++
subversion
zlib-devel
openssl-devel
libffi-devel
openldap-devel
readline-devel
ncurses-devel
qt-x11
qt-postgresql
qt-devel
gtk2-devel
Globus Packages
===============
globus-openssl-module
globus-ftp-client
globus-gsi-proxy
globus-gsi-openssl
globus-ftp-control
globus-gass-copy
globus-common-16.8
globus-gass-transfer
globus-io-11.8
globus-gss-assist
globus-gsi-cert
globus-xio-popen
globus-xio-gsi
globus-gsi-credential
globus-callout-3.15
globus-gsi-sysconfig
globus-gsi-callback
globus-xio-5.14
globus-gssapi-gsi
globus-gass-copy
globus-gsi-proxy
globus-gssapi-error
build
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = APSDataManagement
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
# -*- coding: utf-8 -*-
#
# APS Data Management documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 23 09:20:39 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'APS Data Management'
copyright = u'2017, APS/SDM'
author = u'APS/SDM'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'APSDataManagementdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'APSDataManagement.tex', u'APS Data Management Documentation',
u'APS/SDM', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'apsdatamanagement', u'APS Data Management Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'APSDataManagement', u'APS Data Management Documentation',
author, 'APSDataManagement', 'One line description of project.',
'Miscellaneous'),
]
.. automodule:: dm.aps_bss.api
.. currentmodule:: dm.aps_bss.api
ApsBssApi
---------
.. autoclass:: dm.aps_bss.api.apsBssApi.ApsBssApi()
:show-inheritance:
:members: __init__, listRuns, getCurrentRun, listBeamlineProposals, getBeamlineProposal
.. automodule:: dm.daq_web_service.api
.. currentmodule:: dm.daq_web_service.api
ExperimentDaqApi
-----------------
.. autoclass:: dm.daq_web_service.api.experimentDaqApi.ExperimentDaqApi()
:show-inheritance:
:members: __init__, startDaq, stopDaq, listDaqs, getDaqInfo, upload, stopUpload, listUploads, getUploadInfo, listProcessingPlugins
.. APS Data Management documentation master file, created by
sphinx-quickstart on Thu Feb 23 09:20:39 2017.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to APS Data Management's documentation!
===============================================
The `dm` package contains python APIs for accessing Data Management services.
.. toctree::
:maxdepth: 4
:caption: Contents:
dm.daq_web_service.api
dm.aps_bss.api
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
[WebService]
serviceHost=0.0.0.0
servicePort=44436
sslCertFile=DM_INSTALL_DIR/etc/ssl/cat-web-service.crt
sslKeyFile=DM_INSTALL_DIR/etc/ssl/cat-web-service.key
sslCaCertFile=DM_INSTALL_DIR/etc/ssl/cacert.pem
stationName=DM_STATION_NAME
[AuthorizationPrincipalManager]
principalRetriever=dm.ds_web_service.service.auth.dsAuthPrincipalRetriever.DsAuthPrincipalRetriever()
#principalRetriever=DbPrincipalRetriever()
#principalRetriever=NoOpPrincipalRetriever()
principalAuthenticator1=CryptedPasswordPrincipalAuthenticator()
principalAuthenticator2=LdapPasswordPrincipalAuthenticator(serverUrl='ldaps://phoebusldap.aps.anl.gov:636', dnFormat='uid=%s,ou=people,o=aps.anl.gov,dc=aps,dc=anl,dc=gov')
[MongoDbManager]
mongoDbName=dm
mongoDbUri=mongodb://localhost:27017
mongoDbUser=dm
mongoDbPasswordFile=DM_INSTALL_DIR/etc/dm.db.passwd
[DsRestApiFactory]
username=DM_SYSTEM_USER
passwordFile=DM_INSTALL_DIR/etc/DM_SYSTEM_USER.system.passwd
host=DM_DS_WEB_SERVICE_HOST
port=DM_DS_WEB_SERVICE_PORT
protocol=DM_WEB_SERVICE_PROTOCOL
# Available logger levels: debug, info, warn, error, critical
[ConsoleLogging]
handler=ConsoleLoggingHandler(sys.stdout,)
level=debug
format=%(asctime)s,%(msecs)003d %(levelname)s %(filename)s:%(lineno)d %(process)d: %(message)s
dateFormat=%Y/%m/%d %H:%M:%S
[FileLogging]
handler=TimedRotatingFileLoggingHandler('DM_INSTALL_DIR/var/log/cat-web-service.log')
level=debug
format=%(asctime)s,%(msecs)003d %(levelname)s %(filename)s:%(lineno)d %(process)d: %(message)s
dateFormat=%Y/%m/%d %H:%M:%S
[WebService]
serviceHost=0.0.0.0
servicePort=33336
sslCertFile=DM_INSTALL_DIR/etc/ssl/daq-web-service.crt
sslKeyFile=DM_INSTALL_DIR/etc/ssl/daq-web-service.key
sslCaCertFile=DM_INSTALL_DIR/etc/ssl/cacert.pem
stationName=DM_STATION_NAME
[AuthorizationPrincipalManager]
principalRetriever=dm.ds_web_service.service.auth.dsAuthPrincipalRetriever.DsAuthPrincipalRetriever()
#principalRetriever=DbPrincipalRetriever()
#principalRetriever=NoOpPrincipalRetriever()
principalAuthenticator1=CryptedPasswordPrincipalAuthenticator()
principalAuthenticator2=LdapPasswordPrincipalAuthenticator(serverUrl='ldaps://phoebusldap.aps.anl.gov:636', dnFormat='uid=%s,ou=people,o=aps.anl.gov,dc=aps,dc=anl,dc=gov')
#[SingleSignOnManager]
#sessionManager=dm.ds_web_service.service.auth.dsSessionManager.DsSessionManager()
#sessionTimeoutInSeconds=3600
[FileSystemObserver]
# Minimum file processing delay since last update
minFileProcessingDelayInSeconds=10
fileSystemEventTimeoutInSeconds=10
fileSystemObserverAgent=dm.daq_web_service.service.impl.watchdogFileSystemObserverAgent.WatchdogFileSystemObserverAgent()
#fileSystemObserverAgent=dm.daq_web_service.service.impl.ftpFileSystemObserverAgent.FtpFileSystemObserverAgent('dmdaq', 2811)
[FileProcessingManager]
numberOfProcessingThreads=5
defaultNumberOfRetries=3
defaultRetryWaitPeriodInSeconds=60
fileProcessor1=dm.common.processing.plugins.rsyncFileTransferPlugin.RsyncFileTransferPlugin(localMd5Sum=True,remoteMd5Sum=False,deleteOriginal=False)
#fileProcessor1=dm.common.processing.plugins.gridftpFileTransferPlugin.GridftpFileTransferPlugin()
#fileProcessor2=dm.common.processing.plugins.rsyncFileTransferPlugin.RsyncFileTransferPlugin(dest=\ffdfdsf')
fileProcessor2=dm.common.processing.plugins.mongoDbFileCatalogPlugin.MongoDbFileCatalogPlugin()
fileProcessor3=dm.daq_web_service.service.impl.dsProcessFileNotificationPlugin.DsProcessFileNotificationPlugin()
[DsRestApiFactory]
username=DM_SYSTEM_USER
passwordFile=DM_INSTALL_DIR/etc/DM_SYSTEM_USER.system.passwd
host=DM_DS_WEB_SERVICE_HOST
port=DM_DS_WEB_SERVICE_PORT
protocol=DM_WEB_SERVICE_PROTOCOL
[MongoDbManager]
mongoDbName=dm
mongoDbUri=mongodb://localhost:27017
mongoDbUser=dm
mongoDbPasswordFile=/home/dm/etc/dm.db.passwd
# Available logger levels: debug, info, warn, error, critical
[ConsoleLogging]
handler=ConsoleLoggingHandler(sys.stdout,)
level=debug
format=%(asctime)s,%(msecs)003d %(levelname)s %(filename)s:%(lineno)d %(process)d: %(message)s
dateFormat=%Y/%m/%d %H:%M:%S
[FileLogging]
handler=TimedRotatingFileLoggingHandler('DM_INSTALL_DIR/var/log/daq-web-service.log')
level=debug
format=%(asctime)s,%(msecs)003d %(levelname)s %(filename)s:%(lineno)d %(process)d: %(message)s
dateFormat=%Y/%m/%d %H:%M:%S
DM_DB_NAME=dm
DM_DB_USER=dm
DM_DB_HOST=127.0.0.1
DM_DB_PORT=11136
DM_DB_ADMIN_USER=postgres
DM_DB_ADMIN_HOSTS="127.0.0.1"
DM_DB_SCRIPTS_DIR=
DM_SYSTEM_USER=dm
DM_STORAGE_DIR=
DM_CONTEXT_ROOT=dm
DM_WEB_SERVICE_PROTOCOL=https
DM_DS_WEB_SERVICE_HOST=DM_HOSTNAME
DM_DS_WEB_SERVICE_PORT=22236
DM_DAQ_WEB_SERVICE_HOST=DM_HOSTNAME
DM_DAQ_WEB_SERVICE_PORT=33336
DM_CAT_WEB_SERVICE_HOST=DM_HOSTNAME
DM_CAT_WEB_SERVICE_PORT=44436
DM_PROC_WEB_SERVICE_HOST=DM_HOSTNAME
DM_PROC_WEB_SERVICE_PORT=55536
DM_SOFTWARE_VERSION="1.1 (DM_DATE)"
#
# OpenSSL example configuration file.
# This is mostly being used for generation of certificate requests.
#
# This definition stops the following lines choking if HOME isn't
# defined.
HOME = .
RANDFILE = $ENV::HOME/.rnd
# Uncomment out to enable OpenSSL configuration see config(3)
# openssl_conf = openssl_init
# To use this configuration file with the "-extfile" option of the
# "openssl x509" utility, name here the section containing the
# X.509v3 extensions to use:
# extensions =
# (Alternatively, use a configuration file that has only
# X.509v3 extensions in its main [= default] section.)
[openssl_init]
# Extra OBJECT IDENTIFIER info:
oid_section = new_oids
alg_section = algs
[ new_oids ]
# We can add new OIDs in here for use by any config aware application
# Add a simple OID like this:
# shortname=Long Object Identifier Name, 1.2.3.4
# Or use config file substitution like this:
# testoid2=OID2 LONG NAME, ${testoid1}.5.6, OTHER OID
[ algs ]
# Algorithm configuration options. Currently just fips_mode
fips_mode = no
####################################################################
[ ca ]
default_ca = CA_default # The default ca section
####################################################################
[ CA_default ]
dir = $ENV::DM_INSTALL_DIR/etc/CA # Where everything is kept
certs = $dir/certs # Where the issued certs are kept
crl_dir = $dir/crl # Where the issued crl are kept
database = $dir/index.txt # database index file.
#unique_subject = no # Set to 'no' to allow creation of
# several ctificates with same subject.
new_certs_dir = $dir/newcerts # default place for new certs.
certificate = $dir/cacert.pem # The CA certificate
serial = $dir/serial # The current serial number
crlnumber = $dir/crlnumber # the current crl number
# must be commented out to leave a V1 CRL
crl = $dir/crl.pem # The current CRL
private_key = $dir/private/cakey.pem# The private key
RANDFILE = $dir/private/.rand # private random number file
x509_extensions = usr_cert # The extentions to add to the cert
# Comment out the following two lines for the "traditional"
# (and highly broken) format.
name_opt = ca_default # Subject Name options
cert_opt = ca_default # Certificate field options
# Extension copying option: use with caution.
# copy_extensions = copy
# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs
# so this is commented out by default to leave a V1 CRL.
# crlnumber must also be commented out to leave a V1 CRL.
# crl_extensions = crl_ext
default_days = 3650 # how long to certify for
default_crl_days= 30 # how long before next CRL
default_md = sha512 # which md to use.
preserve = no # keep passed DN ordering
# A few difference way of specifying how similar the request should look
# For type CA, the listed attributes must be the same, and the optional
# and supplied fields are just that :-)
policy = policy_match
# For the CA policy
[ policy_match ]
countryName = match
stateOrProvinceName = optional
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
# For the 'anything' policy
# At this point in time, you must list all acceptable 'object'
# types.
[ policy_anything ]
countryName = optional
#stateOrProvinceName = optional
#localityName = optional
organizationName = optional
#organizationalUnitName = optional
commonName = supplied
emailAddress = optional
####################################################################
[ req ]
default_bits = 2048
default_md = sha512
default_keyfile = privkey.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
x509_extensions = v3_ca # The extentions to add to the self signed cert
# Passwords for private keys if not present they will be prompted for
# input_password = secret
# output_password = secret
# This sets a mask for permitted string types. There are several options.
# default: PrintableString, T61String, BMPString.
# pkix : PrintableString, BMPString.
# utf8only: only UTF8Strings.
# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).
# MASK:XXXX a literal mask value.
# WARNING: current versions of Netscape crash on BMPStrings or UTF8Strings
# so use this option with caution!
# we use PrintableString+UTF8String mask so if pure ASCII texts are used
# the resulting certificates are compatible with Netscape
string_mask = MASK:0x2002
# req_extensions = v3_req # The extensions to add to a certificate request
[ req_distinguished_name ]
countryName = Country Name (2 letter code)
countryName_default = US
countryName_min = 2
countryName_max = 2
#stateOrProvinceName = State or Province Name (full name)
#stateOrProvinceName_default =
#localityName = Locality Name (eg, city)
#localityName_default =
0.organizationName = Organization Name (eg, company)
0.organizationName_default = Argonne National Laboratory
# we can do this but it is not needed normally :-)
1.organizationName = Second Organization Name (eg, company)
1.organizationName_default = Advanced Photon Source
organizationalUnitName = Organizational Unit Name (eg, section)
organizationalUnitName_default = AES/SSG
commonName = Common Name (eg, your name or your server\'s hostname)
commonName_max = 64
emailAddress = Email Address
emailAddress_max = 64
# SET-ex3 = SET extension number 3
[ req_attributes ]
challengePassword = A challenge password
challengePassword_min = 4
challengePassword_max = 20
unstructuredName = An optional company name
[ usr_cert ]
# These extensions are added when 'ca' signs a request.
# This goes against PKIX guidelines but some CAs do it and some software
# requires this to avoid interpreting an end user certificate as a CA.
basicConstraints=CA:FALSE
# Here are some examples of the usage of nsCertType. If it is omitted
# the certificate can be used for anything *except* object signing.
# This is OK for an SSL server.
# nsCertType = server
# For an object signing certificate this would be used.
# nsCertType = objsign
# For normal client use this is typical
# nsCertType = client, email
# and for everything including object signing:
# nsCertType = client, email, objsign
# This is typical in keyUsage for a client certificate.
# keyUsage = nonRepudiation, digitalSignature, keyEncipherment
# This will be displayed in Netscape's comment listbox.
nsComment = "OpenSSL Generated Certificate"
# PKIX recommendations harmless if included in all certificates.
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid,issuer
# This stuff is for subjectAltName and issuerAltname.
# Import the email address.
# subjectAltName=email:copy
# An alternative to produce certificates that aren't
# deprecated according to PKIX.
# subjectAltName=email:move
# Copy subject details
# issuerAltName=issuer:copy
#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem
#nsBaseUrl
#nsRevocationUrl
#nsRenewalUrl
#nsCaPolicyUrl
#nsSslServerName
[ server ]
basicConstraints=CA:FALSE
nsCertType = server
nsComment = "OpenSSL Generated Server Certificate"
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid,issuer:always
[ v3_req ]
# Extensions to add to a certificate request
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
[ v3_ca ]
# Extensions for a typical CA
# PKIX recommendation.
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid:always,issuer:always
# This is what PKIX recommends but some broken software chokes on critical
# extensions.
basicConstraints = critical,CA:true
# So we do this instead.
#basicConstraints = CA:true
# Key usage: this is typical for a CA certificate. However since it will
# prevent it being used as an test self-signed certificate it is best
# left out by default.
# keyUsage = cRLSign, keyCertSign
# Some might want this also
# nsCertType = sslCA, emailCA
# Include email address in subject alt name: another PKIX recommendation
# subjectAltName=email:copy
# Copy issuer details
# issuerAltName=issuer:copy
# DER hex encoding of an extension: beware experts only!
# obj=DER:02:03
# Where 'obj' is a standard or added object
# You can even override a supported extension:
# basicConstraints= critical, DER:30:03:01:01:FF
[ crl_ext ]
# CRL extensions.
# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.
# issuerAltName=issuer:copy
authorityKeyIdentifier=keyid:always,issuer:always
[ proxy_cert_ext ]
# These extensions should be added when creating a proxy certificate
# This goes against PKIX guidelines but some CAs do it and some software
# requires this to avoid interpreting an end user certificate as a CA.
basicConstraints=CA:FALSE
# Here are some examples of the usage of nsCertType. If it is omitted
# the certificate can be used for anything *except* object signing.
# This is OK for an SSL server.
# nsCertType = server
# For an object signing certificate this would be used.
# nsCertType = objsign
# For normal client use this is typical
# nsCertType = client, email
# and for everything including object signing:
# nsCertType = client, email, objsign
# This is typical in keyUsage for a client certificate.
# keyUsage = nonRepudiation, digitalSignature, keyEncipherment
# This will be displayed in Netscape's comment listbox.
nsComment = "OpenSSL Generated Certificate"
# PKIX recommendations harmless if included in all certificates.
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid,issuer:always
# This stuff is for subjectAltName and issuerAltname.
# Import the email address.
# subjectAltName=email:copy
# An alternative to produce certificates that aren't
# deprecated according to PKIX.
# subjectAltName=email:move
# Copy subject details
# issuerAltName=issuer:copy
#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem
#nsBaseUrl
#nsRevocationUrl
#nsRenewalUrl
#nsCaPolicyUrl
#nsSslServerName
# This really needs to be in place for it to be a proxy certificate.
proxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo
## DM user sudo functions
## All strings starting with DM_* have to be replaced with actual values
Host_Alias HOST=DM_HOSTNAME
User_Alias USER=DM_SYSTEM_UNIX_ACCOUNT
Cmnd_Alias SETFACL=/usr/bin/setfacl -m group\:*\:rx DM_STORAGE_DIR/*
Cmnd_Alias USERMOD=/usr/sbin/usermod -a -G * *
Cmnd_Alias GROUPADD=/usr/sbin/groupadd *
Cmnd_Alias CHOWN=/bin/chown \:* *
Cmnd_Alias CHOWN_R=/bin/chown -R \:* *
Cmnd_Alias GPASSWD=/usr/bin/gpasswd * * *
Cmnd_Alias NSCD=/usr/sbin/nscd -i *
USER HOST = (root) NOPASSWD: SETFACL,USERMOD,GROUPADD,CHOWN,CHOWN_R,GPASSWD,NSCD
DM_DB_NAME=dm_dev
DM_DB_USER=dm_dev
DM_DB_HOST=127.0.0.1
DM_DB_PORT=11136
DM_DB_ADMIN_USER=postgres
DM_DB_ADMIN_HOSTS="127.0.0.1"
DM_DB_SCRIPTS_DIR=
DM_SYSTEM_USER=dm
DM_STORAGE_DIR=
DM_CONTEXT_ROOT=dm_dev
DM_WEB_SERVICE_PROTOCOL=https
DM_DS_WEB_SERVICE_HOST=DM_HOSTNAME
DM_DS_WEB_SERVICE_PORT=22237
DM_DAQ_WEB_SERVICE_HOST=DM_HOSTNAME
DM_DAQ_WEB_SERVICE_PORT=33337
DM_CAT_WEB_SERVICE_HOST=DM_HOSTNAME
DM_CAT_WEB_SERVICE_PORT=44437
DM_PROC_WEB_SERVICE_HOST=DM_HOSTNAME
DM_PROC_WEB_SERVICE_PORT=55537
DM_SOFTWARE_VERSION="Development Snapshot (DM_DATE)"
[WebService]
serviceHost=0.0.0.0
servicePort=22236
sslCertFile=DM_INSTALL_DIR/etc/ssl/ds-web-service.crt
sslKeyFile=DM_INSTALL_DIR/etc/ssl/ds-web-service.key
sslCaCertFile=DM_INSTALL_DIR/etc/ssl/cacert.pem
[AuthorizationPrincipalManager]
principalRetriever=DbPrincipalRetriever()
#principalRetriever=NoOpPrincipalRetriever()
principalAuthenticator1=CryptedPasswordPrincipalAuthenticator()
principalAuthenticator2=LdapPasswordPrincipalAuthenticator(serverUrl='ldaps://dmid-vm.xray.aps.anl.gov:636', dnFormat='uid=%s,ou=people,o=aps.anl.gov,dc=aps,dc=anl,dc=gov')
[ExperimentManager]
storageDirectory=DM_STORAGE_DIR
storageId=extrepid
manageStoragePermissions=True
#platformUtility=dm.common.utility.linuxUtility.LinuxUtility()
platformUtility=dm.common.utility.ldapLinuxPlatformUtility.LdapLinuxPlatformUtility('ldaps://dmid-vm.xray.aps.anl.gov:636', 'uid=DM_SYSTEM_UNIX_ACCOUNT,ou=People,o=aps.anl.gov,dc=aps,dc=anl,dc=gov', 'DM_INSTALL_DIR/etc/DM_SYSTEM_UNIX_ACCOUNT.ldap.passwd', groupDnFormat='cn=%s,ou=DM,ou=Group,o=aps.anl.gov,dc=aps,dc=anl,dc=gov', minGidNumber=66000)
[LdapLinuxPlatformUtility]
refreshAuthFilesCommand=
[DbManager]
dbSchema=DM_DB_NAME
dbUser=DM_DB_NAME
dbPasswordFile=DM_INSTALL_DIR/etc/DM_DB_NAME.db.passwd
[MongoDbManager]
mongoDbName=dm
mongoDbUri=mongodb://localhost:27017
mongoDbUser=dm
mongoDbPasswordFile=DM_INSTALL_DIR/etc/DM_DB_NAME.db.passwd
[FileProcessingManager]
numberOfProcessingThreads=3
defaultNumberOfRetries=3
defaultRetryWaitPeriodInSeconds=60
#fileProcessor1=dm.common.processing.plugins.mongoDbFileCatalogPlugin.MongoDbFileCatalogPlugin()
#fileProcessor2=dm.common.processing.plugins.sddsParameterProcessingPlugin.SddsParameterProcessingPlugin()
#fileProcessor3=dm.common.processing.plugins.scriptProcessingPlugin.ScriptProcessingPlugin()
#fileProcessor4=dm.common.processing.plugins.sgeJobSubmissionPlugin.SgeJobSubmissionPlugin('/opt/sge')
# Available logger levels: debug, info, warn, error, critical
[ConsoleLogging]
handler=ConsoleLoggingHandler(sys.stdout,)
level=debug
format=%(asctime)s,%(msecs)003d %(levelname)s %(filename)s:%(lineno)d %(process)d: %(message)s
dateFormat=%Y/%m/%d %H:%M:%S
[FileLogging]
handler=TimedRotatingFileLoggingHandler('DM_INSTALL_DIR/var/log/ds-web-service.log')
level=debug
format=%(asctime)s,%(msecs)003d %(levelname)s %(filename)s:%(lineno)d %(process)d: %(message)s
dateFormat=%Y/%m/%d %H:%M:%S