First working version + cleanups.
authorFrantišek Dvořák <valtri@civ.zcu.cz>
Tue, 19 Jan 2016 16:56:48 +0000 (17:56 +0100)
committerFrantišek Dvořák <valtri@civ.zcu.cz>
Tue, 19 Jan 2016 16:56:48 +0000 (17:56 +0100)
25 files changed:
.gitignore [new file with mode: 0644]
cfg_forkys.sh [deleted file]
cfg_nat.sh [deleted file]
config/perun-hador/process-hadoop_base.sh [new file with mode: 0755]
config/perun-hador/process-hadoop_hdfs.sh [new file with mode: 0755]
config/perun-nosec/process-hadoop_base.sh [new file with mode: 0755]
config/perun-nosec/process-hadoop_hdfs.sh [new file with mode: 0755]
config/puppet.conf.hador [new file with mode: 0644]
docker.sh
machines/Makefile [new file with mode: 0644]
machines/hador-gen-virt-frontend.sh [new file with mode: 0755]
machines/hador-gen-virt-master.sh [new file with mode: 0755]
machines/hador-gen-virt-node.sh [moved from hador-gen-virt.sh with 75% similarity]
machines/orig/cfg_hostname.sh [new file with mode: 0644]
machines/orig/install_phase1.sh [new file with mode: 0644]
machines/public/hador20-1.sh [moved from hador20-1.sh with 100% similarity]
machines/public/hador20-2.sh [moved from hador20-2.sh with 100% similarity]
machines/public/hador21-1.sh [moved from hador21-1.sh with 100% similarity]
machines/public/hador21-2.sh [moved from hador21-2.sh with 100% similarity]
machines/public/hador22-1.sh [moved from hador22-1.sh with 100% similarity]
machines/public/hador22-2.sh [moved from hador22-2.sh with 100% similarity]
machines/public/hador23-1.sh [moved from hador23-1.sh with 100% similarity]
machines/public/hador23-2.sh [moved from hador23-2.sh with 100% similarity]
machines/public/hador24-1.sh [moved from hador24-1.sh with 100% similarity]
machines/public/hador24-2.sh [moved from hador24-2.sh with 100% similarity]

diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..2fcdce6
--- /dev/null
@@ -0,0 +1,2 @@
+machines/*.sh
+!machines/*-gen-*.sh
diff --git a/cfg_forkys.sh b/cfg_forkys.sh
deleted file mode 100644 (file)
index 8cd48ec..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-
-export FACTER_fqdn="forkys.zcu.cz"
-export FACTER_hostname=`echo $FACTER_fqdn | sed 's/\..*//g'`
-export FACTER_ipaddress="147.228.1.147"
-export FACTER_netmask="255.255.255.0"
-export FACTER_ipaddress6="2001:718:1801:1001::1:147"
-export FACTER_netmask6="64"
-export FACTER_gw6="2001:718:1801:1001::1:1"
-
-export FACTER_gw="147.228.1.1"
-export FACTER_macaddress="52:54:00:12:20:12"
-export XENBR="br0"
-export SIZE_ROOTFS="15G"
-export SIZE_SWAP="512M"
-export SIZE_MEM="1024"
-export SIZE_CPU="2"
-
diff --git a/cfg_nat.sh b/cfg_nat.sh
deleted file mode 100644 (file)
index 3a91a1a..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-
-export FACTER_fqdn="localmachine.vagrant"
-export FACTER_hostname=`echo $FACTER_fqdn | sed 's/\..*//g'`
-export FACTER_ipaddress="dhcp"
-#export FACTER_netmask="255.255.255.0"
-#export FACTER_ipaddress6="2001:718:1801:1001::1:147"
-#export FACTER_netmask6="64"
-#export FACTER_gw6="2001:718:1801:1001::1:1"
-
-#export FACTER_gw="147.228.1.1"
-export FACTER_macaddress="52:54:00:12:20:12"
-#export XENBR="br0"
-export SIZE_ROOTFS="15G"
-export SIZE_SWAP="512M"
-export SIZE_MEM="1024"
-export SIZE_CPU="4"
-
diff --git a/config/perun-hador/process-hadoop_base.sh b/config/perun-hador/process-hadoop_base.sh
new file mode 100755 (executable)
index 0000000..4fc26ab
--- /dev/null
@@ -0,0 +1,85 @@
+#! /bin/bash
+
+PROTOCOL_VERSION='3.0.0'
+
+function process() {
+       FROM_PERUN="${WORK_DIR}/hadoop_base"
+
+       KEYTAB='/etc/security/keytab/hbase.service.keytab'
+       KRB5CCNAME="FILE:${WORK_DIR}/krb5cc_perun_hbase"
+       PRINCIPAL="hbase/`hostname -f`@ICS.MUNI.CZ"
+
+       RESERVED='NAMESPACE\|default\|hbase\|hive\|oozie'
+
+       I_HBASE_CREATED=(0 'User ${login} will be created.')
+       I_HBASE_DELETED=(0 'User ${login} will be deleted.')
+       I_HBASE_OK=(0 'HBase shell OK.')
+       E_KINIT_FAILED=(1 'Kinit on HBase master failed')
+       E_HBASE_NAMESPACE_LIST=(2 'Cannot get list of namespaces')
+       E_HBASE_TABLE_LIST=(3 'Cannot get list of tables from ${login}')
+       E_HBASE_FAILED=(4 'HBase shell failed')
+       E_EMPTY_LIST=(5 'The list is empty!')
+       E_EMPTY_USERNAME=(6 'Empty username')
+
+       create_lock
+       chown hbase "${WORK_DIR}"
+       su hbase -s /bin/bash -p -c "kinit -k -t '${KEYTAB}' '${PRINCIPAL}'" || log_msg E_KINIT_FAILED
+
+       # get list from HBase
+       echo list_namespace | su hbase -s /bin/bash -p -c "hbase shell -n" >"${WORK_DIR}/out.txt" 2>/dev/null || log_msg E_HBASE_NAMESPACE_LIST
+       head -n -3 "${WORK_DIR}/out.txt" | sort > "${WORK_DIR}/hbase-list.txt"
+
+       # get list from Perun
+        [ -s "${FROM_PERUN}" ] || log_msg E_EMPTY_LIST
+       sort "${FROM_PERUN}" > "${WORK_DIR}/perun-list.txt"
+
+       # compare and action
+       rm -f /var/tmp/perun-hbase-delete.sh "${WORK_DIR}/add.hbase" "${WORK_DIR}/del.hbase"
+       diff "${WORK_DIR}/hbase-list.txt" "${WORK_DIR}/perun-list.txt" | while read op login; do
+               case "$op" in
+                       '>')
+                               # add user
+                               [ -n "${login}" ] || log_msg E_EMPTY_USERNAME
+
+                               (echo "create_namespace '${login}'"
+                                echo "grant '${login}', 'RWXCA', '@${login}'"
+                                echo "grant '${login}', 'C'"
+                                echo
+                               ) >> "${WORK_DIR}/add.hbase"
+                               log_msg I_HBASE_CREATED
+                       ;;
+                       '<')
+                               # delete user (no real delete for now)
+                               [ -n "${login}" ] || log_msg E_EMPTY_USERNAME
+
+                               if echo "${login}" | grep -q "${RESERVED}"; then
+                                       continue
+                               fi
+
+                               # a) delete tables
+                               echo "list '${login}:.*'" | hbase shell -n 2>/dev/null > "${WORK_DIR}/out.txt" || log_msg E_HBASE_TABLE_LIST
+                               cat "${WORK_DIR}/out.txt" | awk '/^$/ {o=1;next} /.*/ && o {print $0}' > "${WORK_DIR}/hbase-user-tables.txt"
+                               while read table; do
+                                       echo "disable '${table}'"
+                                       echo "drop '${table}'"
+                               done < "${WORK_DIR}/hbase-user-tables.txt" >> "${WORK_DIR}/del.hbase"
+                               # b) delete user
+                               (echo "revoke '${login}', '@${login}'"
+                                echo "revoke '${login}', 'C'"
+                                echo
+                               ) >> "${WORK_DIR}/del.hbase"
+                               log_msg I_HBASE_DELETED
+                       ;;
+               esac
+       done
+
+       if [ -s "${WORK_DIR}/add.hbase" ]; then
+               cat "${WORK_DIR}/add.hbase" | su hbase -s /bin/bash -p -c "hbase shell -n" >/dev/null 2>&1 || log_msg E_HBASE_FAILED
+               log_msg I_HBASE_OK
+       fi
+       if [ -s "${WORK_DIR}/del.hbase" ]; then
+               mv "${WORK_DIR}/del.hbase" /var/tmp/perun-hbase-delete.sh
+       fi
+
+       kdestroy
+}
diff --git a/config/perun-hador/process-hadoop_hdfs.sh b/config/perun-hador/process-hadoop_hdfs.sh
new file mode 100755 (executable)
index 0000000..b385cdd
--- /dev/null
@@ -0,0 +1,60 @@
+#! /bin/bash
+
+PROTOCOL_VERSION='3.0.0'
+
+function process() {
+       FROM_PERUN="${WORK_DIR}/hadoop_hdfs"
+
+       KEYTAB='/etc/security/keytab/nn.service.keytab'
+       KRB5CCNAME="FILE:${WORK_DIR}/krb5cc_perun_nn"
+       PRINCIPAL="nn/`hostname -f`@ICS.MUNI.CZ"
+
+       RESERVED='hbase\|hive\|oozie'
+
+       I_HDFS_CREATED=(0 'Directory /user/${login} created.')
+       I_HDFS_DELETED=(0 'Directory /user/${login} deleted.')
+       E_KINIT_FAILED=(1 'Kinit on NameNode failed')
+       E_HDFS_LIST_FAILED=(2 'Cannot get list of directories from HDFS')
+       E_HDFS_MKDIR_FAILED=(2 'Cannot create directory /user/${login}')
+       E_HDFS_PERMS_FAILED=(2 'Cannot set permissions on /user/${login}')
+       E_EMPTY_LIST=(3 'The list is empty!')
+       E_EMPTY_USERNAME=(4 'Empty username')
+
+       create_lock
+       chown hdfs "${WORK_DIR}"
+       su hdfs -s /bin/bash -p -c "kinit -k -t '${KEYTAB}' '${PRINCIPAL}'" || log_msg E_KINIT_FAILED
+
+       # get list from Hadoop HDFS
+       su hdfs -s /bin/bash -p -c "hdfs dfs -ls /user" >"${WORK_DIR}/hdfs-dirs.txt" || log_msg E_HDFS_LIST_FAILED
+       tail -n +2 "${WORK_DIR}/hdfs-dirs.txt" | sed 's,.* /user/,,' | sort > "${WORK_DIR}/hdfs-list.txt"
+
+       # get list from Perun
+        [ -s "${FROM_PERUN}" ] || log_msg E_EMPTY_LIST
+       sort "${FROM_PERUN}" > "${WORK_DIR}/perun-list.txt"
+
+       # compare and action
+       rm -f /var/tmp/perun-hdfs-delete.sh
+       diff "${WORK_DIR}/hdfs-list.txt" "${WORK_DIR}/perun-list.txt" | while read op login; do
+               case "$op" in
+                       '>')
+                               # add user
+                               [ -n "${login}" ] || log_msg E_EMPTY_USERNAME
+                               su hdfs -s /bin/bash -p -c "hdfs dfs -mkdir '/user/${login}'" || log_msg E_HDFS_MKDIR_FAILED
+                               su hdfs -s /bin/bash -p -c "hdfs dfs -chown '${login}:hadoop' '/user/${login}'" || log_msg E_HDFS_PERMS_FAILED
+                               su hdfs -s /bin/bash -p -c "hdfs dfs -chmod 0750 '/user/${login}'" || log_msg E_HDFS_PERMS_FAILED
+                               log_msg I_HDFS_CREATED
+                       ;;
+                       '<')
+                               # delete user (no real delete for now)
+                               [ -n "${login}" ] || log_msg E_EMPTY_USERNAME
+                               if echo "${login}" | grep -q "${RESERVED}"; then
+                                       continue
+                               fi
+                               echo "su hdfs -s /bin/bash -p -c \"hdfs dfs -rm -r '/user/${login}'\"" >> /var/tmp/perun-hdfs-delete.sh
+                               #log_msg I_HDFS_DELETED
+                       ;;
+               esac
+       done
+
+       kdestroy
+}
diff --git a/config/perun-nosec/process-hadoop_base.sh b/config/perun-nosec/process-hadoop_base.sh
new file mode 100755 (executable)
index 0000000..abfdf3d
--- /dev/null
@@ -0,0 +1,78 @@
+#! /bin/bash
+
+PROTOCOL_VERSION='3.0.0'
+
+function process() {
+       FROM_PERUN="${WORK_DIR}/hadoop_base"
+
+       RESERVED='NAMESPACE\|default\|hbase\|hive\|oozie'
+
+       I_HBASE_CREATED=(0 'User ${login} will be created.')
+       I_HBASE_DELETED=(0 'User ${login} will be deleted.')
+       I_HBASE_OK=(0 'HBase shell OK.')
+       E_KINIT_FAILED=(1 'Kinit on HBase master failed')
+       E_HBASE_NAMESPACE_LIST=(2 'Cannot get list of namespaces')
+       E_HBASE_TABLE_LIST=(3 'Cannot get list of tables from ${login}')
+       E_HBASE_FAILED=(4 'HBase shell failed')
+       E_EMPTY_LIST=(5 'The list is empty!')
+       E_EMPTY_USERNAME=(6 'Empty username')
+
+       create_lock
+       chown hbase "${WORK_DIR}"
+
+       # get list from HBase
+       echo list_namespace | su hbase -s /bin/bash -p -c "hbase shell -n" >"${WORK_DIR}/out.txt" 2>/dev/null || log_msg E_HBASE_NAMESPACE_LIST
+       head -n -3 "${WORK_DIR}/out.txt" | sort > "${WORK_DIR}/hbase-list.txt"
+
+       # get list from Perun
+        [ -s "${FROM_PERUN}" ] || log_msg E_EMPTY_LIST
+       sort "${FROM_PERUN}" > "${WORK_DIR}/perun-list.txt"
+
+       # compare and action
+       rm -f /var/tmp/perun-hbase-delete.sh "${WORK_DIR}/add.hbase" "${WORK_DIR}/del.hbase"
+       diff "${WORK_DIR}/hbase-list.txt" "${WORK_DIR}/perun-list.txt" | while read op login; do
+               case "$op" in
+                       '>')
+                               # add user
+                               [ -n "${login}" ] || log_msg E_EMPTY_USERNAME
+
+                               (echo "create_namespace '${login}'"
+                                echo "grant '${login}', 'RWXCA', '@${login}'"
+                                echo "grant '${login}', 'C'"
+                                echo
+                               ) >> "${WORK_DIR}/add.hbase"
+                               log_msg I_HBASE_CREATED
+                       ;;
+                       '<')
+                               # delete user (no real delete for now)
+                               [ -n "${login}" ] || log_msg E_EMPTY_USERNAME
+
+                               if echo "${login}" | grep -q "${RESERVED}"; then
+                                       continue
+                               fi
+
+                               # a) delete tables
+                               echo "list '${login}:.*'" | hbase shell -n 2>/dev/null > "${WORK_DIR}/out.txt" || log_msg E_HBASE_TABLE_LIST
+                               cat "${WORK_DIR}/out.txt" | awk '/^$/ {o=1;next} /.*/ && o {print $0}' > "${WORK_DIR}/hbase-user-tables.txt"
+                               while read table; do
+                                       echo "disable '${table}'"
+                                       echo "drop '${table}'"
+                               done < "${WORK_DIR}/hbase-user-tables.txt" >> "${WORK_DIR}/del.hbase"
+                               # b) delete user
+                               (echo "revoke '${login}', '@${login}'"
+                                echo "revoke '${login}', 'C'"
+                                echo
+                               ) >> "${WORK_DIR}/del.hbase"
+                               log_msg I_HBASE_DELETED
+                       ;;
+               esac
+       done
+
+       if [ -s "${WORK_DIR}/add.hbase" ]; then
+               cat "${WORK_DIR}/add.hbase" | su hbase -s /bin/bash -p -c "hbase shell -n" >/dev/null 2>&1 || log_msg E_HBASE_FAILED
+               log_msg I_HBASE_OK
+       fi
+       if [ -s "${WORK_DIR}/del.hbase" ]; then
+               mv "${WORK_DIR}/del.hbase" /var/tmp/perun-hbase-delete.sh
+       fi
+}
diff --git a/config/perun-nosec/process-hadoop_hdfs.sh b/config/perun-nosec/process-hadoop_hdfs.sh
new file mode 100755 (executable)
index 0000000..33502e5
--- /dev/null
@@ -0,0 +1,53 @@
+#! /bin/bash
+
+PROTOCOL_VERSION='3.0.0'
+
+function process() {
+       FROM_PERUN="${WORK_DIR}/hadoop_hdfs"
+
+       RESERVED='hbase\|hive\|oozie\|spark'
+
+       I_HDFS_CREATED=(0 'Directory /user/${login} created.')
+       I_HDFS_DELETED=(0 'Directory /user/${login} deleted.')
+       E_KINIT_FAILED=(1 'Kinit on NameNode failed')
+       E_HDFS_LIST_FAILED=(2 'Cannot get list of directories from HDFS')
+       E_HDFS_MKDIR_FAILED=(2 'Cannot create directory /user/${login}')
+       E_HDFS_PERMS_FAILED=(2 'Cannot set permissions on /user/${login}')
+       E_EMPTY_LIST=(3 'The list is empty!')
+       E_EMPTY_USERNAME=(4 'Empty username')
+
+       create_lock
+       chown hdfs "${WORK_DIR}"
+
+       # get list from Hadoop HDFS
+       su hdfs -s /bin/bash -p -c "hdfs dfs -ls /user" >"${WORK_DIR}/hdfs-dirs.txt" || log_msg E_HDFS_LIST_FAILED
+       tail -n +2 "${WORK_DIR}/hdfs-dirs.txt" | sed 's,.* /user/,,' | sort > "${WORK_DIR}/hdfs-list.txt"
+
+       # get list from Perun
+        [ -s "${FROM_PERUN}" ] || log_msg E_EMPTY_LIST
+       sort "${FROM_PERUN}" > "${WORK_DIR}/perun-list.txt"
+
+       # compare and action
+       rm -f /var/tmp/perun-hdfs-delete.sh
+       diff "${WORK_DIR}/hdfs-list.txt" "${WORK_DIR}/perun-list.txt" | while read op login; do
+               case "$op" in
+                       '>')
+                               # add user
+                               [ -n "${login}" ] || log_msg E_EMPTY_USERNAME
+                               su hdfs -s /bin/bash -p -c "hdfs dfs -mkdir '/user/${login}'" || log_msg E_HDFS_MKDIR_FAILED
+                               su hdfs -s /bin/bash -p -c "hdfs dfs -chown '${login}:hadoop' '/user/${login}'" || log_msg E_HDFS_PERMS_FAILED
+                               su hdfs -s /bin/bash -p -c "hdfs dfs -chmod 0750 '/user/${login}'" || log_msg E_HDFS_PERMS_FAILED
+                               log_msg I_HDFS_CREATED
+                       ;;
+                       '<')
+                               # delete user (no real delete for now)
+                               [ -n "${login}" ] || log_msg E_EMPTY_USERNAME
+                               if echo "${login}" | grep -q "${RESERVED}"; then
+                                       continue
+                               fi
+                               echo "su hdfs -s /bin/bash -p -c \"hdfs dfs -rm -r '/user/${login}'\"" >> /var/tmp/perun-hdfs-delete.sh
+                               #log_msg I_HDFS_DELETED
+                       ;;
+               esac
+       done
+}
diff --git a/config/puppet.conf.hador b/config/puppet.conf.hador
new file mode 100644 (file)
index 0000000..b7e53be
--- /dev/null
@@ -0,0 +1,15 @@
+[main]
+logdir=/opt/puppet3-omnibus/embedded/var/log/puppet
+vardir=/opt/puppet3-omnibus/embedded/var/lib/puppet
+ssldir=/opt/puppet3-omnibus/embedded/var/lib/puppet/ssl
+rundir=/opt/puppet3-omnibus/embedded/var/run/puppet
+factpath=$vardir/lib/facter
+templatedir=$confdir/templates
+server=myriad7.zcu.cz
+
+[master]
+# These are needed when the puppetmaster is run by passenger
+# and can safely be removed if webrick is used.
+ssl_client_header = SSL_CLIENT_S_DN
+ssl_client_verify_header = SSL_CLIENT_VERIFY
+
index 8defd42..7017c88 100755 (executable)
--- a/docker.sh
+++ b/docker.sh
 #
 # 3) /etc/default/docker
 #
-# DOCKER_OPTS="--cluster-advertise=br0:2376 --cluster-store=zk://hador-c1.ics.muni.cz,hador-c2.ics.muni.cz,hador.ics.muni.cz/docker-hador"
+# DOCKER_OPTS="--cluster-advertise=ib0:2376 --cluster-store=zk://hador-c1.ics.muni.cz,hador-c2.ics.muni.cz,hador.ics.muni.cz/docker-hador"
+#
+# DOCKER_OPTS="--bridge=br0 --cluster-advertise=ib0:2376 --cluster-store=zk://hador-c1.ics.muni.cz,hador-c2.ics.muni.cz,hador.ics.muni.cz/docker-hador --default-gateway=147.251.9.1 --fixed-cidr=147.251.9.220/31 --ip-masq=false --iptables=false --ipv6=false"
 #
 # 4) docker overlay network created
 #
 # docker network create -d overlay vxlan
 #
 
-#DOCKER_network='vxlan'
-DOCKER_network='bridge'
+DOCKER_network='vxlan'
+#DOCKER_network='bridge'
 
 
 # get the first free network device
@@ -73,13 +75,13 @@ if [ x"${1}" = x"init" ]; then
     i=$((i+1))
     mkdir /data/${i}${DISK_SUBDIR} 2>/dev/null || :
   done
-  mkdir /scratch${DISK_SUBDIR} 2>/dev/null || :
+  mkdir /scratch${SCRATCH_SUBDIR} 2>/dev/null || :
 
   if [ ! -f ~/.ssh/id_rsa_docker ]; then
     ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa_docker
     cp -p ~/.ssh/id_rsa_docker.pub ~/.ssh/authorized_keys_docker
-    if [ -f `dirname $0`/authorized_keys ]
-      cat `dirname $0`/authorized_keys >> ~/.ssh/authorized_keys_docker
+    if [ -f `dirname $0`/config/authorized_keys ]; then
+      cat `dirname $0`/config/authorized_keys >> ~/.ssh/authorized_keys_docker
     fi
   fi
 fi
@@ -88,14 +90,12 @@ for d in ${DISKS}; do
   i=$((i+1))
   ARGS="${ARGS} -v /data/${i}${DISK_SUBDIR}:/data/${i}"
 done
-ARGS="${ARGS} -v /scratch:/scratch"
+ARGS="${ARGS} -v /scratch${SCRATCH_SUBDIR}:/scratch"
 
 docker run -itd \
-  -v ~/.k5login:/root/.k5login \
   -v ~/.ssh/authorized_keys_docker:/root/.ssh/authorized_keys \
-  -v /etc/krb5.conf:/etc/krb5.conf \
   --net=${DOCKER_network} \
-  --restart=on-failure:5 \
+  --restart=on-failure:0 \
   ${ARGS} \
   "$@" \
   valtri/hadoop:puppetlabs \
@@ -109,7 +109,7 @@ fi
 # ==== public IPv6 ====
 dev=veth`devname`
 # this is not persistent, let's create a script
-cat << EOF > /var/run/docker/${FACTER_hostname}.sh
+cat << EOF > /etc/docker/net-${FACTER_hostname}.sh
 mkdir -p /var/run/netns || :
 find -L /etc/ssl/certs -type l -delete || :
 pid=\`docker inspect -f '{{.State.Pid}}' ${FACTER_hostname}\`
@@ -119,9 +119,9 @@ ip link add ${dev}a type veth peer name ${dev}b
 brctl addif ${XENBR} ${dev}a
 ip link set ${dev}a up
 ip link set ${dev}b netns \$pid
-ip netns exec \$pid ip link set dev ${dev}b name eth6
-ip netns exec \$pid ip link set eth6 address ${FACTER_macaddress}
-ip netns exec \$pid ip link set eth6 up
+ip netns exec \$pid ip link set dev ${dev}b name public6
+ip netns exec \$pid ip link set public6 address ${FACTER_macaddress}
+ip netns exec \$pid ip link set public6 up
 EOF
-chmod +x /var/run/docker/${FACTER_hostname}.sh
-sh -xe /var/run/docker/${FACTER_hostname}.sh
+chmod +x /etc/docker/net-${FACTER_hostname}.sh
+sh -xe /etc/docker/net-${FACTER_hostname}.sh
diff --git a/machines/Makefile b/machines/Makefile
new file mode 100644 (file)
index 0000000..b14e44a
--- /dev/null
@@ -0,0 +1,11 @@
+all:
+       for j in 1 2; do \
+               for i in $(shell seq 1 24); do \
+                       ./hador-gen-virt-node.sh $${i} $${j} > hador$${i}-$${j}.sh; \
+               done; \
+               ./hador-gen-virt-master.sh 1 $${j} > hador-c1-$${j}.sh; \
+               ./hador-gen-virt-master.sh 2 $${j} > hador-c2-$${j}.sh; \
+               ./hador-gen-virt-frontend.sh $${j} > hador-$${j}.sh; \
+       done
+
+.PHONY: all
diff --git a/machines/hador-gen-virt-frontend.sh b/machines/hador-gen-virt-frontend.sh
new file mode 100755 (executable)
index 0000000..a1884d1
--- /dev/null
@@ -0,0 +1,26 @@
+#! /bin/sh
+
+NVIRT=$1
+
+NIP=0
+if [ -z "${NVIRT}" ]; then
+  NVIRT=1
+fi
+
+NIP_HEX=`printf "%02x\n" ${NIP}`
+NVIRT_HEX=`printf "%02x\n" ${NVIRT}`
+
+cat <<EOF
+export FACTER_fqdn="hador-${NVIRT}.ics.muni.cz"
+export FACTER_hostname=\`echo \$FACTER_fqdn | sed 's/\..*//g'\`
+export FACTER_macaddress="02:93:0a:00:${NVIRT_HEX}:${NIP_HEX}"
+
+export XENBR="br0"
+export SIZE_ROOTFS="300G"
+export SIZE_SWAP="0"
+export SIZE_MEM="61440"
+export SIZE_CPU="8"
+export DISKS=""
+export DISK_SUBDIR=""
+export SCRATCH_SUBDIR=""
+EOF
diff --git a/machines/hador-gen-virt-master.sh b/machines/hador-gen-virt-master.sh
new file mode 100755 (executable)
index 0000000..5f33eba
--- /dev/null
@@ -0,0 +1,31 @@
+#! /bin/sh
+
+N=$1
+NVIRT=$2
+
+if [ -z "${N}" ]; then
+  echo "Usage: $0 <MACHINE_NUMBER> [<VIRTUAL_NUMBER>]"
+  exit 1
+fi
+NIP=`expr ${N} + 192`
+if [ -z "${NVIRT}" ]; then
+  NVIRT=1
+fi
+
+NIP_HEX=`printf "%02x\n" ${NIP}`
+NVIRT_HEX=`printf "%02x\n" ${NVIRT}`
+
+cat <<EOF
+export FACTER_fqdn="hador-c${N}-${NVIRT}.ics.muni.cz"
+export FACTER_hostname=\`echo \$FACTER_fqdn | sed 's/\..*//g'\`
+export FACTER_macaddress="02:93:0a:00:${NVIRT_HEX}:${NIP_HEX}"
+
+export XENBR="br0"
+export SIZE_ROOTFS="300G"
+export SIZE_SWAP="0"
+export SIZE_MEM="61440"
+export SIZE_CPU="8"
+export DISKS=""
+export DISK_SUBDIR="/virt-${NVIRT}"
+export SCRATCH_SUBDIR="/virt-${NVIRT}"
+EOF
similarity index 75%
rename from hador-gen-virt.sh
rename to machines/hador-gen-virt-node.sh
index fd97e79..40559b7 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#! /bin/sh
 
 N=$1
 NVIRT=$2
@@ -16,9 +16,9 @@ NVIRT_HEX=`printf "%02x\n" ${NVIRT}`
 
 cat <<EOF
 export FACTER_fqdn="hador${N}-${NVIRT}.ics.muni.cz"
-export FACTER_hostname=\`echo $FACTER_fqdn | sed 's/\..*//g'\`
-export FACTER_macaddress="02:42:0a:00:${NVIRT_HEX}:${N_HEX}"
+export FACTER_hostname=\`echo \$FACTER_fqdn | sed 's/\..*//g'\`
+export FACTER_macaddress="02:93:0a:00:${NVIRT_HEX}:${N_HEX}"
+
 export XENBR="br0"
 export SIZE_ROOTFS="300G"
 export SIZE_SWAP="0"
@@ -26,4 +26,5 @@ export SIZE_MEM="61440"
 export SIZE_CPU="8"
 export DISKS="/dev/sda /dev/sdb /dev/sdc /dev/sdd /dev/sde /dev/sdf /dev/sdg /dev/sdh /dev/sdk /dev/sdl /dev/sdm /dev/sdn"
 export DISK_SUBDIR="/virt-${NVIRT}"
+export SCRATCH_SUBDIR="/virt-${NVIRT}"
 EOF
diff --git a/machines/orig/cfg_hostname.sh b/machines/orig/cfg_hostname.sh
new file mode 100644 (file)
index 0000000..7917ae7
--- /dev/null
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+export FACTER_fqdn="hostname.domain.com"
+export FACTER_hostname=`echo $FACTER_fqdn | sed 's/\..*//g'`
+export FACTER_ipaddress="10.7.0.12"
+export FACTER_netmask="255.255.255.0"
+export FACTER_gw="10.7.0.1"
+export FACTER_broadcast="10.7.0.255"
+export FACTER_macaddress="00:16:3e:07:00:12"
+export XENBR="br1"
+export SIZE_ROOTFS="15G"
+export SIZE_SWAP="512M"
+export SIZE_MEM="1024"
+export SIZE_CPU="2"
+
diff --git a/machines/orig/install_phase1.sh b/machines/orig/install_phase1.sh
new file mode 100644 (file)
index 0000000..4f00cce
--- /dev/null
@@ -0,0 +1,107 @@
+#!/bin/sh
+
+if [ -z $1 ]; then
+       echo "ERROR: no config parameter"
+       exit 1
+fi
+. ./$1 || exit 1
+
+VOL=/dev/mapper/${VG}-${FACTER_hostname}
+if [ -e $VOL ]; then
+       echo "ERROR: volume $VOL already exist"
+       exit 1
+fi
+
+if [ -z $FACTER_fqdn ]; then
+       echo "ERROR: no facter fqdn settings found"
+       exit 1
+fi
+if [ -z $FACTER_hostname ]; then
+       echo "ERROR: no facter hostname settings found"
+       exit 1
+fi
+
+VG=vg00
+
+lvcreate -L${SIZE_ROOTFS} -n ${FACTER_hostname} ${VG}
+mkfs.xfs /dev/mapper/${VG}-${FACTER_hostname}
+lvcreate -L${SIZE_SWAP} -n ${FACTER_hostname}sw ${VG}
+mkswap /dev/mapper/${VG}-${FACTER_hostname}sw
+
+mkdir -p /mnt/target
+mount /dev/mapper/${VG}-${FACTER_hostname} /mnt/target || exit 1
+cd /mnt/target || exit 1
+tar xf /usr/local/store/wheezy02.tar --strip-components 1 --numeric-owner
+
+#mount -o bind /proc proc
+#mount -o bind /dev dev
+#mount -o bind /sys sys
+#cp -v /etc/resolv.conf etc
+#cp -vr /etc/apt etc
+#chroot .
+#apt-get update
+#yes | aptitude install puppet xfsprogs git
+
+cat << __EOF__ > /mnt/target/etc/network/interfaces
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+        address $FACTER_ipaddress
+        netmask $FACTER_netmask
+        broadcast $FACTER_broadcast
+        gateway $FACTER_gw
+
+__EOF__
+
+cat << __EOF__ > /mnt/target/etc/fstab
+# /etc/fstab: static file system information.
+#
+# Use 'blkid' to print the universally unique identifier for a
+# device; this may be used with UUID= as a more robust way to name devices
+# that works even if disks are added and removed. See fstab(5).
+#
+# <file system> <mount point>   <type>  <options>       <dump>  <pass>
+proc            /proc           proc    defaults        0       0
+/dev/xvda      /               xfs     defaults        0       1
+/dev/xvdb      none            swap    sw              0       0
+__EOF__
+
+perl -pi -e 's#1:2345:respawn:/sbin/getty 38400 tty1#1:2345:respawn:/sbin/getty --noclear 38400 hvc0#' /mnt/target/etc/inittab
+HOTOVO=1
+while [ $HOTOVO -ne 0 ]; do
+       chroot . passwd
+       HOTOVO=$?
+done
+
+#exit
+#umount proc dev sys
+cd /mnt
+umount target
+
+cat << __EOF__ > /etc/xen/boot/${FACTER_hostname} 
+#----------------------------------------------------------------------------
+# Standard variables
+kernel = "/boot/vmlinuz-3.2.0-4-amd64"
+ramdisk = "/boot/initrd.img-3.2.0-4-amd64"
+memory = ${SIZE_MEM}
+name = "${FACTER_hostname}"
+vcpus = ${SIZE_CPU}
+vif = ['mac=${FACTER_macaddress}, bridge=${XENBR}']
+disk = [ 'phy:/dev/mapper/${VG}-${FACTER_hostname},xvda,w',
+        'phy:/dev/mapper/${VG}-${FACTER_hostname}sw,xvdb,w'
+]
+root = "/dev/xvda ro"
+extra = "clocksource=xen"
+__EOF__
+
+#xm create boot/gemini2 -c
+echo "INFO: $0 done"
+echo "INFO: xm create boot/${FACTER_hostname} -c"
+
+
similarity index 100%
rename from hador20-1.sh
rename to machines/public/hador20-1.sh
similarity index 100%
rename from hador20-2.sh
rename to machines/public/hador20-2.sh
similarity index 100%
rename from hador21-1.sh
rename to machines/public/hador21-1.sh
similarity index 100%
rename from hador21-2.sh
rename to machines/public/hador21-2.sh
similarity index 100%
rename from hador22-1.sh
rename to machines/public/hador22-1.sh
similarity index 100%
rename from hador22-2.sh
rename to machines/public/hador22-2.sh
similarity index 100%
rename from hador23-1.sh
rename to machines/public/hador23-1.sh
similarity index 100%
rename from hador23-2.sh
rename to machines/public/hador23-2.sh
similarity index 100%
rename from hador24-1.sh
rename to machines/public/hador24-1.sh
similarity index 100%
rename from hador24-2.sh
rename to machines/public/hador24-2.sh