diff --git a/doc/demo/apsu-20150709/demo_notes.sv.txt b/doc/demo/apsu-20150709/demo_notes.sv.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4698e29fcb19bc17ea0ff2cf60d0002ddb1dfb26
--- /dev/null
+++ b/doc/demo/apsu-20150709/demo_notes.sv.txt
@@ -0,0 +1,219 @@
+# Need 1 terminal on DAQ node, 1 terminal on HPC node and 2 terminals on DS
+# node
+
+#
+# SCENARIO 1: BASIC UPLOAD
+#
+
+# ssh -X dm@dmstorage: start services
+cd /opt/DM/dev
+source setup.sh
+./etc/init.d/dm-postgresql start
+./etc/init.d/dm-glassfish start
+./etc/init.d/dm-mongodb start
+./etc/init.d/dm-ds-web-service start
+./etc/init.d/dm-cat-web-service start
+
+# ssh -X dm@dmdaq: start services
+cd /opt/DM/dev
+source setup.sh
+./etc/init.d/dm-daq-web-service start
+
+# ssh -X dm@dmhpc: start services, check NFS
+cd /opt/DM/dev
+source setup.sh
+./etc/init.d/dm-daq-web-service start
+ls -l /net/dmstorage/opt/DM
+
+#####################################################################
+
+# dm@dmstorage: check directory content on the storage node
+ls -l /opt/DM/data
+
+# ssh sveseli@dmstorage: add and start experiment e1
+source /opt/DM/etc/dm.setup.sh
+dm-add-experiment --name e1 --type-id 1
+dm-start-experiment --name e1
+
+# dm@dmstorage: check directory content on the storage node 
+# note that experiment directory permissions are restricted
+ls -l /opt/DM/data/ESAF
+ls -l /opt/DM/data/ESAF/e1/
+
+# ssh dm@dmdaq: source setup file, show test data
+source /opt/DM/etc/dm.setup.sh
+ls -lR /opt/DM/experiments/e1
+cat /opt/DM/experiments/e1/file1 
+
+# dm@dmdaq: upload data for experiment e1
+dm-upload --experiment e1 --data-directory /opt/DM/experiments/e1
+
+# dm@dmstorage: check experiment storage directory content
+# note permissions, ownership
+ls -l /opt/DM/data/ESAF/e1/
+ls -l /opt/DM/data/ESAF/e1/2015/07/09/
+
+#
+# SCENARIO 2: UPLOAD + METADATA CATALOG 
+#
+
+# sveseli@dmstorage: get metadata for experiment files from catalogging service
+dm-get-experiment-files --experiment e1
+dm-get-experiment-file --experiment e1 --file file2 --display-keys=__all__
+
+# dm@dmdaq: upload data for experiment e1, this time specify extra keys
+dm-upload --experiment e1 --data-directory /opt/DM/experiments/e1 ownerUser:JohnC ownerGroup:APSU memo1:ApprovedByNDA memo2:DislikedByGD
+
+# sveseli@dmstorage: get metadata for file 2 again
+dm-get-experiment-file --experiment e1 --file file2 --display-keys=__all__
+
+# sveseli@dmstorage: show metadata updates 
+dm-update-experiment-file --experiment e1 --file file3 quality:A --display-keys=id,fileName,quality
+
+# sveseli@dmstorage: show metadata search
+dm-get-experiment-files --experiment e1 quality:A
+dm-get-experiment-files --experiment e1 storageFilePath:2015
+
+#
+# SCENARIO 3: UPLOAD + METADATA CATALOG + SDDS PARAMETERS
+#
+
+# sveseli@dmstorage: add and start experiment mm1 
+dm-add-experiment --name mm1 --type-id 1
+dm-start-experiment --name mm1
+
+# dm@dmdaq: upload data for experiment mm1, and request SDDS parameter
+# processing
+ls -lR /opt/DM/experiments/mm1
+dm-upload --experiment mm1 --data-directory /opt/DM/experiments/mm1 ownerUser:JohnC ownerGroup:APSU processSddsParameters:True
+
+# sveseli@dmstorage: get mm1 files, observe SDDS parameters 
+dm-get-experiment-files --experiment mm1 
+dm-get-experiment-files --experiment mm1 --display-keys=__all__ --display-format=dict
+
+# dm@dmstorage: compare with sddsprintout (permissions do not allow sveseli
+# account to access file)
+export PATH=$PATH:/opt/epics/extensions/bin/linux-x86_64/
+sddsprintout -parameters /opt/DM/data/ESAF/mm1/hallProbeScan-M1Proto-000000072-0009-000000.edf
+
+#
+# SCENARIO 4: UPLOAD + METADATA CATALOG + SDDS PARAMETERS + SCRIPT PROCESSING 
+#
+
+# dm@dmstorage: show processing script
+cat /opt/DM/processing/find_sdds_row_count.sh
+/opt/DM/processing/find_sdds_row_count.sh /opt/DM/data/ESAF/mm1/hallProbeScan-M1Proto-000000072-0009-000000.edf
+
+# sveseli@dmstorage: get mm1 files, note no key processingScriptOutput
+dm-get-experiment-files --experiment mm1 --display-keys=fileName,processingScriptOutput 
+
+# dm@dmdaq: upload data for experiment mm1, request SDDS parameter
+# processing, specify processing script
+dm-upload --experiment mm1 --data-directory /opt/DM/experiments/mm1 processSddsParameters:True processingScript:/opt/DM/processing/find_sdds_row_count.sh
+
+# sveseli@dmstorage: get mm1 files, note present key processingScriptOutput
+dm-get-experiment-files --experiment mm1 --display-keys=fileName,processingScriptOutput 
+
+#
+# SCENARIO 5: UPLOAD + METADATA CATALOG + SDDS PARAMETERS + HPC PROCESSING 
+#
+
+# dm@dmstorage: show processing script
+more /opt/DM/processing/sge_sdds_analysis.sh
+
+# dm@dmstorage: show no png files in experiment directory
+ls -l /opt/DM/data/ESAF/mm1/*.png
+
+# dm@dmhpc: show empty home directory
+cd
+ls -l
+
+# dm@dmhpc: show qstat
+source /opt/sge/default/common/settings.sh
+qstat -f
+watch -d 'qstat -f'
+
+# sveseli@dmstorage: get mm1 files, note only 1 file
+dm-get-experiment-files --experiment mm1 
+
+# dm@dmdaq: upload data for experiment mm1, request SDDS parameter
+# processing, specify SGE processing script
+dm-upload --experiment mm1 --data-directory /opt/DM/experiments/mm1 processSddsParameters:True sgeJobScript:/opt/DM/processing/sge_sdds_analysis.sh
+
+# sveseli@dmstorage: get mm1 files, note 2 files
+dm-get-experiment-files --experiment mm1 
+
+# sveseli@dmstorage: get mm1 .png files, note parentFile key
+dm-get-experiment-files --experiment mm1 fileName:.png --display-keys=__all__
+
+# dm@dmhpc: show SGE output in home directory
+ls -l
+
+# dm@dmstorage: open processed file
+xdg-open /opt/DM/data/ESAF/mm1/hallProbeScan-M1Proto-000000072-0009-000000.edf.png
+
+#
+# SCENARIO 6: DAQ + METADATA CATALOG + SDDS PARAMETERS + HPC PROCESSING 
+#
+
+# sveseli@dmstorage: add and start experiment mm2 
+dm-add-experiment --name mm2 --type-id 1
+dm-start-experiment --name mm2
+
+# sveseli@dmstorage: get mm2 files, note no files
+dm-get-experiment-files --experiment mm2 
+
+# dm@dmstorage: show no png files in experiment directory
+ls -l /opt/DM/data/ESAF/mm2/*.png
+
+# dm@dmstorage: tail log file to observe processing
+tail -f /opt/DM/var/log/dm.ds-web-service.log
+
+# dm@dmdaq: start DAQ for experiment mm2, request SDDS parameter
+# processing, specify SGE processing script
+rm -rf /tmp/data/mm2
+mkdir -p /tmp/data/mm2
+dm-start-daq --experiment mm2 --data-directory /tmp/data/mm2 processSddsParameters:True sgeJobScript:/opt/DM/processing/sge_sdds_analysis.sh
+
+# dm@dmhpc: show qstat
+watch -d 'qstat -f'
+
+# dm@dmdaq: copy experiment mm2 files into observed directory, watch qstat
+ls -l /opt/DM/experiments/mm2/
+cp /opt/DM/experiments/mm2/* /tmp/data/mm2/ && sleep 5 && touch /tmp/data/mm2/* &
+tail -f /opt/DM/var/log/dm.daq-web-service.log
+
+
+# sveseli@dmstorage: get mm2 files, note original + processed files
+dm-get-experiment-files --experiment mm2 
+
+# dm@dmstorage: show png files in experiment directory
+ls -l /opt/DM/data/ESAF/mm2/*.png
+
+# dm@dmdaq: stop DAQ for experiment mm2
+dm-stop-daq --experiment mm2 
+
+#
+# SCENARIO 7: DATASET DEFINITION
+#
+
+# sveseli@dmstorage: add metadata for couple of experiment e2 files 
+# with different keys
+dm-add-experiment-file --experiment e2 --file x1 status:good
+dm-add-experiment-file --experiment e2 --file y1 status:bad
+dm-get-experiment-files --experiment e2 --display-keys=fileName,status
+
+# sveseli@dmstorage: add dataset metadata 
+dm-add-experiment-dataset --experiment e2 --dataset d1 status:g.*
+
+# sveseli@dmstorage: get dataset files, note only one file matches
+dm-get-experiment-dataset-files --experiment e2 --dataset d1
+
+# sveseli@dmstorage: add metadata for anothare e2 file that 
+# should match dataset constraint
+dm-add-experiment-file --experiment e2 --file x2 status:great
+dm-get-experiment-files --experiment e2 --display-keys=fileName,status
+
+# sveseli@dmstorage: get dataset files, note two files match
+dm-get-experiment-dataset-files --experiment e2 --dataset d1
+
diff --git a/doc/demo/apsu-20150709/machine_prep_notes.sv.txt b/doc/demo/apsu-20150709/machine_prep_notes.sv.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1be9beefb2919f2b5f193ce6ca41547cb962f3a2
--- /dev/null
+++ b/doc/demo/apsu-20150709/machine_prep_notes.sv.txt
@@ -0,0 +1,147 @@
+# Demo environment consists of three linux VMs: 
+#     - data acquisition (DAQ), data storage (DS), sge cluster (HPC) nodes
+#     - CentOS 6.6, 64-bit
+#     - no shared storage
+#     - DS node runs PostgreSQL database server, Web Portal, DS Web Service, 
+#       CAT Web Service, MongoDB server
+#     - DAQ node runs DAQ Web Service
+#     - HPC node runs SGE cluster
+
+# Machine Preparation
+# ===================
+
+# install dependencies (all machines)
+yum install -y gcc libgcc expect zlib-devel openssl-devel openldap-devel subversion make sed gawk autoconf automake wget readline-devel
+
+# Download globus RPM repo and install gridftp (both machines)
+# http://toolkit.globus.org/ftppub/gt6/installers/repo/globus-toolkit-repo-latest.noarch.rpm
+yum install globus-gridftp
+
+# Disable requiredtty in /etc/sudoers
+
+# Prepare gridftp server to use sshd (dmstorage machine)
+globus-gridftp-server-enable-sshftp
+
+# create system (dm) account on both machines, configure ssh-keys and 
+# authorized_keys files
+
+# create several user accounts (dmstorage machine): dmuser1, dmuser2, dmuser3
+
+# build and install epics base and SDDS/SDDSepics extensions under 
+# /opt/epics (dmstorage machine)
+# build SDDS python under /opt/epics/extensions/src/SDDS/python/
+# copy sdds.py into /opt/DM/support/python/linux-x86_64/lib/python2.7/
+# copy /opt/epics/extensions/src/SDDS/python/O.linux-x86_64/sddsdatamodule.so
+# into /opt/DM/support/python/linux-x86_64/lib/python2.7/lib-dynload/
+
+# export /opt/DM to dmhpc node
+# yum install nfs-util
+# edit /etc/exports and add /opt/DM 192.168.100.8(rw,sync)
+# exportfs -a
+# restart nfs
+
+# install sge on hpc machine, add dmstorage as submission node, 
+# copy /opt/sge to dmstorage
+
+# configure /opt/DM area for software installation
+mkdir -p /opt/DM
+chown -R dm.dm /opt/DM
+chmod 755 /opt/DM
+
+# configure (or disable) firewall (both machines)
+/etc/init.d/iptables stop
+
+# DM Deployment: DS Machine
+# =========================
+
+# Log into dmstorage node and create local DM deployment directory 
+# in dm user home area
+cd /opt/DM
+ls -l
+
+# Checkout code as release 0.2
+svn co https://subversion.xray.aps.anl.gov/DataManagement/trunk dev
+
+# Build support area
+cd dev
+make support
+
+# Source setup 
+source setup.sh
+
+# Create db
+make db
+
+# Configure Web Portal
+# Note:
+#   - this needs to be done only during the first portal deployment,
+#     or after portal has been unconfigured explicitly
+#   - this step configures DB access
+#   - adds initial DM system user to the DB
+make configure-web-portal
+
+# Add few users
+#dm-add-user --username dmuser1 --first-name Test --last-name User1
+#dm-add-user --username dmuser2 --first-name Test --last-name User2
+#dm-add-user --username dmuser3 --first-name Test --last-name User3
+
+# Deploy Web Portal
+# Note:
+#   - deploys portal war file into glassfish
+#   - after this step, users can access portal at
+#     https://dmstorage.svdev.net:8181/dm
+make deploy-web-portal
+
+# Deploy DS Web Service
+# Note:
+#   - generates SSL certificates and configuration files 
+#   - after this step, DS web service is accessible at port 22236 
+#   - log files are under DM/var/log
+#   - configuration files are under DM/etc
+#   - user setup file is DM/etc/dm.setup.sh
+#   - service control script is under DM/dm-0.2/etc/init.d 
+make deploy-ds-web-service
+
+# Check functionality. Open second terminal and log into dmstorage node
+# as user sveseli
+# Source setup file to get access to DM commands
+source /opt/DM/etc/dm.setup.sh
+
+# Get user list as administrator (dm) account 
+dm-get-users
+
+# DM Deployment: DAQ Machine/HPC Machine
+# ======================================
+
+# Log into dmdaq node and create local DM deployment directory 
+# in dm user home area
+cd /opt/DM
+ls -l
+
+# Checkout code as release 0.2
+svn co https://subversion.xray.aps.anl.gov/DataManagement/trunk dev
+
+# Build support area 
+# Note the following:
+#   - since demo machines are identical, we could simply copy support/dm code
+#     from the storage node; this is not necessarily the case in general
+#   - support area and DM code distribution can be shared between DAQ and DS
+#     nodes
+#   - support area on the daq node is much lighter (i.e., no need
+#     for glassfish, etc.)
+cd dev
+make support-daq
+
+# Source setup 
+source setup.sh
+
+# Deploy DAQ Web Service
+# Note:
+#   - requires storage node to be installed 
+#   - generates SSL certificates and configuration files 
+#   - after this step, DAQ web service is accessible at port 33336 
+#   - log files are under DM/var/log
+#   - configuration files are under DM/etc
+#   - user setup file is DM/etc/dm.setup.sh
+make deploy-daq-web-service
+