From 0d39b8773ac6f03f859404479c83d9e7ba9b1674 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Franti=C5=A1ek=20Dvo=C5=99=C3=A1k?= Date: Tue, 19 Jan 2016 17:56:48 +0100 Subject: [PATCH] First working version + cleanups. --- .gitignore | 2 + cfg_forkys.sh | 20 ---- cfg_nat.sh | 20 ---- config/perun-hador/process-hadoop_base.sh | 85 ++++++++++++++++ config/perun-hador/process-hadoop_hdfs.sh | 60 ++++++++++++ config/perun-nosec/process-hadoop_base.sh | 78 +++++++++++++++ config/perun-nosec/process-hadoop_hdfs.sh | 53 ++++++++++ config/puppet.conf.hador | 15 +++ docker.sh | 32 +++--- machines/Makefile | 11 +++ machines/hador-gen-virt-frontend.sh | 26 +++++ machines/hador-gen-virt-master.sh | 31 ++++++ .../hador-gen-virt-node.sh | 9 +- machines/orig/cfg_hostname.sh | 17 ++++ machines/orig/install_phase1.sh | 107 +++++++++++++++++++++ hador20-1.sh => machines/public/hador20-1.sh | 0 hador20-2.sh => machines/public/hador20-2.sh | 0 hador21-1.sh => machines/public/hador21-1.sh | 0 hador21-2.sh => machines/public/hador21-2.sh | 0 hador22-1.sh => machines/public/hador22-1.sh | 0 hador22-2.sh => machines/public/hador22-2.sh | 0 hador23-1.sh => machines/public/hador23-1.sh | 0 hador23-2.sh => machines/public/hador23-2.sh | 0 hador24-1.sh => machines/public/hador24-1.sh | 0 hador24-2.sh => machines/public/hador24-2.sh | 0 25 files changed, 506 insertions(+), 60 deletions(-) create mode 100644 .gitignore delete mode 100644 cfg_forkys.sh delete mode 100644 cfg_nat.sh create mode 100755 config/perun-hador/process-hadoop_base.sh create mode 100755 config/perun-hador/process-hadoop_hdfs.sh create mode 100755 config/perun-nosec/process-hadoop_base.sh create mode 100755 config/perun-nosec/process-hadoop_hdfs.sh create mode 100644 config/puppet.conf.hador create mode 100644 machines/Makefile create mode 100755 machines/hador-gen-virt-frontend.sh create mode 100755 machines/hador-gen-virt-master.sh rename hador-gen-virt.sh => machines/hador-gen-virt-node.sh (75%) create mode 100644 machines/orig/cfg_hostname.sh create mode 100644 machines/orig/install_phase1.sh rename hador20-1.sh => machines/public/hador20-1.sh (100%) rename hador20-2.sh => machines/public/hador20-2.sh (100%) rename hador21-1.sh => machines/public/hador21-1.sh (100%) rename hador21-2.sh => machines/public/hador21-2.sh (100%) rename hador22-1.sh => machines/public/hador22-1.sh (100%) rename hador22-2.sh => machines/public/hador22-2.sh (100%) rename hador23-1.sh => machines/public/hador23-1.sh (100%) rename hador23-2.sh => machines/public/hador23-2.sh (100%) rename hador24-1.sh => machines/public/hador24-1.sh (100%) rename hador24-2.sh => machines/public/hador24-2.sh (100%) diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2fcdce6 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +machines/*.sh +!machines/*-gen-*.sh diff --git a/cfg_forkys.sh b/cfg_forkys.sh deleted file mode 100644 index 8cd48ec..0000000 --- a/cfg_forkys.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh - -export FACTER_fqdn="forkys.zcu.cz" -export FACTER_hostname=`echo $FACTER_fqdn | sed 's/\..*//g'` -export FACTER_ipaddress="147.228.1.147" -export FACTER_netmask="255.255.255.0" - -export FACTER_ipaddress6="2001:718:1801:1001::1:147" -export FACTER_netmask6="64" -export FACTER_gw6="2001:718:1801:1001::1:1" - -export FACTER_gw="147.228.1.1" -export FACTER_macaddress="52:54:00:12:20:12" - -export XENBR="br0" -export SIZE_ROOTFS="15G" -export SIZE_SWAP="512M" -export SIZE_MEM="1024" -export SIZE_CPU="2" - diff --git a/cfg_nat.sh b/cfg_nat.sh deleted file mode 100644 index 3a91a1a..0000000 --- a/cfg_nat.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh - -export FACTER_fqdn="localmachine.vagrant" -export FACTER_hostname=`echo $FACTER_fqdn | sed 's/\..*//g'` -export FACTER_ipaddress="dhcp" -#export FACTER_netmask="255.255.255.0" - -#export FACTER_ipaddress6="2001:718:1801:1001::1:147" -#export FACTER_netmask6="64" -#export FACTER_gw6="2001:718:1801:1001::1:1" - -#export FACTER_gw="147.228.1.1" -export FACTER_macaddress="52:54:00:12:20:12" - -#export XENBR="br0" -export SIZE_ROOTFS="15G" -export SIZE_SWAP="512M" -export SIZE_MEM="1024" -export SIZE_CPU="4" - diff --git a/config/perun-hador/process-hadoop_base.sh b/config/perun-hador/process-hadoop_base.sh new file mode 100755 index 0000000..4fc26ab --- /dev/null +++ b/config/perun-hador/process-hadoop_base.sh @@ -0,0 +1,85 @@ +#! /bin/bash + +PROTOCOL_VERSION='3.0.0' + +function process() { + FROM_PERUN="${WORK_DIR}/hadoop_base" + + KEYTAB='/etc/security/keytab/hbase.service.keytab' + KRB5CCNAME="FILE:${WORK_DIR}/krb5cc_perun_hbase" + PRINCIPAL="hbase/`hostname -f`@ICS.MUNI.CZ" + + RESERVED='NAMESPACE\|default\|hbase\|hive\|oozie' + + I_HBASE_CREATED=(0 'User ${login} will be created.') + I_HBASE_DELETED=(0 'User ${login} will be deleted.') + I_HBASE_OK=(0 'HBase shell OK.') + E_KINIT_FAILED=(1 'Kinit on HBase master failed') + E_HBASE_NAMESPACE_LIST=(2 'Cannot get list of namespaces') + E_HBASE_TABLE_LIST=(3 'Cannot get list of tables from ${login}') + E_HBASE_FAILED=(4 'HBase shell failed') + E_EMPTY_LIST=(5 'The list is empty!') + E_EMPTY_USERNAME=(6 'Empty username') + + create_lock + chown hbase "${WORK_DIR}" + su hbase -s /bin/bash -p -c "kinit -k -t '${KEYTAB}' '${PRINCIPAL}'" || log_msg E_KINIT_FAILED + + # get list from HBase + echo list_namespace | su hbase -s /bin/bash -p -c "hbase shell -n" >"${WORK_DIR}/out.txt" 2>/dev/null || log_msg E_HBASE_NAMESPACE_LIST + head -n -3 "${WORK_DIR}/out.txt" | sort > "${WORK_DIR}/hbase-list.txt" + + # get list from Perun + [ -s "${FROM_PERUN}" ] || log_msg E_EMPTY_LIST + sort "${FROM_PERUN}" > "${WORK_DIR}/perun-list.txt" + + # compare and action + rm -f /var/tmp/perun-hbase-delete.sh "${WORK_DIR}/add.hbase" "${WORK_DIR}/del.hbase" + diff "${WORK_DIR}/hbase-list.txt" "${WORK_DIR}/perun-list.txt" | while read op login; do + case "$op" in + '>') + # add user + [ -n "${login}" ] || log_msg E_EMPTY_USERNAME + + (echo "create_namespace '${login}'" + echo "grant '${login}', 'RWXCA', '@${login}'" + echo "grant '${login}', 'C'" + echo + ) >> "${WORK_DIR}/add.hbase" + log_msg I_HBASE_CREATED + ;; + '<') + # delete user (no real delete for now) + [ -n "${login}" ] || log_msg E_EMPTY_USERNAME + + if echo "${login}" | grep -q "${RESERVED}"; then + continue + fi + + # a) delete tables + echo "list '${login}:.*'" | hbase shell -n 2>/dev/null > "${WORK_DIR}/out.txt" || log_msg E_HBASE_TABLE_LIST + cat "${WORK_DIR}/out.txt" | awk '/^$/ {o=1;next} /.*/ && o {print $0}' > "${WORK_DIR}/hbase-user-tables.txt" + while read table; do + echo "disable '${table}'" + echo "drop '${table}'" + done < "${WORK_DIR}/hbase-user-tables.txt" >> "${WORK_DIR}/del.hbase" + # b) delete user + (echo "revoke '${login}', '@${login}'" + echo "revoke '${login}', 'C'" + echo + ) >> "${WORK_DIR}/del.hbase" + log_msg I_HBASE_DELETED + ;; + esac + done + + if [ -s "${WORK_DIR}/add.hbase" ]; then + cat "${WORK_DIR}/add.hbase" | su hbase -s /bin/bash -p -c "hbase shell -n" >/dev/null 2>&1 || log_msg E_HBASE_FAILED + log_msg I_HBASE_OK + fi + if [ -s "${WORK_DIR}/del.hbase" ]; then + mv "${WORK_DIR}/del.hbase" /var/tmp/perun-hbase-delete.sh + fi + + kdestroy +} diff --git a/config/perun-hador/process-hadoop_hdfs.sh b/config/perun-hador/process-hadoop_hdfs.sh new file mode 100755 index 0000000..b385cdd --- /dev/null +++ b/config/perun-hador/process-hadoop_hdfs.sh @@ -0,0 +1,60 @@ +#! /bin/bash + +PROTOCOL_VERSION='3.0.0' + +function process() { + FROM_PERUN="${WORK_DIR}/hadoop_hdfs" + + KEYTAB='/etc/security/keytab/nn.service.keytab' + KRB5CCNAME="FILE:${WORK_DIR}/krb5cc_perun_nn" + PRINCIPAL="nn/`hostname -f`@ICS.MUNI.CZ" + + RESERVED='hbase\|hive\|oozie' + + I_HDFS_CREATED=(0 'Directory /user/${login} created.') + I_HDFS_DELETED=(0 'Directory /user/${login} deleted.') + E_KINIT_FAILED=(1 'Kinit on NameNode failed') + E_HDFS_LIST_FAILED=(2 'Cannot get list of directories from HDFS') + E_HDFS_MKDIR_FAILED=(2 'Cannot create directory /user/${login}') + E_HDFS_PERMS_FAILED=(2 'Cannot set permissions on /user/${login}') + E_EMPTY_LIST=(3 'The list is empty!') + E_EMPTY_USERNAME=(4 'Empty username') + + create_lock + chown hdfs "${WORK_DIR}" + su hdfs -s /bin/bash -p -c "kinit -k -t '${KEYTAB}' '${PRINCIPAL}'" || log_msg E_KINIT_FAILED + + # get list from Hadoop HDFS + su hdfs -s /bin/bash -p -c "hdfs dfs -ls /user" >"${WORK_DIR}/hdfs-dirs.txt" || log_msg E_HDFS_LIST_FAILED + tail -n +2 "${WORK_DIR}/hdfs-dirs.txt" | sed 's,.* /user/,,' | sort > "${WORK_DIR}/hdfs-list.txt" + + # get list from Perun + [ -s "${FROM_PERUN}" ] || log_msg E_EMPTY_LIST + sort "${FROM_PERUN}" > "${WORK_DIR}/perun-list.txt" + + # compare and action + rm -f /var/tmp/perun-hdfs-delete.sh + diff "${WORK_DIR}/hdfs-list.txt" "${WORK_DIR}/perun-list.txt" | while read op login; do + case "$op" in + '>') + # add user + [ -n "${login}" ] || log_msg E_EMPTY_USERNAME + su hdfs -s /bin/bash -p -c "hdfs dfs -mkdir '/user/${login}'" || log_msg E_HDFS_MKDIR_FAILED + su hdfs -s /bin/bash -p -c "hdfs dfs -chown '${login}:hadoop' '/user/${login}'" || log_msg E_HDFS_PERMS_FAILED + su hdfs -s /bin/bash -p -c "hdfs dfs -chmod 0750 '/user/${login}'" || log_msg E_HDFS_PERMS_FAILED + log_msg I_HDFS_CREATED + ;; + '<') + # delete user (no real delete for now) + [ -n "${login}" ] || log_msg E_EMPTY_USERNAME + if echo "${login}" | grep -q "${RESERVED}"; then + continue + fi + echo "su hdfs -s /bin/bash -p -c \"hdfs dfs -rm -r '/user/${login}'\"" >> /var/tmp/perun-hdfs-delete.sh + #log_msg I_HDFS_DELETED + ;; + esac + done + + kdestroy +} diff --git a/config/perun-nosec/process-hadoop_base.sh b/config/perun-nosec/process-hadoop_base.sh new file mode 100755 index 0000000..abfdf3d --- /dev/null +++ b/config/perun-nosec/process-hadoop_base.sh @@ -0,0 +1,78 @@ +#! /bin/bash + +PROTOCOL_VERSION='3.0.0' + +function process() { + FROM_PERUN="${WORK_DIR}/hadoop_base" + + RESERVED='NAMESPACE\|default\|hbase\|hive\|oozie' + + I_HBASE_CREATED=(0 'User ${login} will be created.') + I_HBASE_DELETED=(0 'User ${login} will be deleted.') + I_HBASE_OK=(0 'HBase shell OK.') + E_KINIT_FAILED=(1 'Kinit on HBase master failed') + E_HBASE_NAMESPACE_LIST=(2 'Cannot get list of namespaces') + E_HBASE_TABLE_LIST=(3 'Cannot get list of tables from ${login}') + E_HBASE_FAILED=(4 'HBase shell failed') + E_EMPTY_LIST=(5 'The list is empty!') + E_EMPTY_USERNAME=(6 'Empty username') + + create_lock + chown hbase "${WORK_DIR}" + + # get list from HBase + echo list_namespace | su hbase -s /bin/bash -p -c "hbase shell -n" >"${WORK_DIR}/out.txt" 2>/dev/null || log_msg E_HBASE_NAMESPACE_LIST + head -n -3 "${WORK_DIR}/out.txt" | sort > "${WORK_DIR}/hbase-list.txt" + + # get list from Perun + [ -s "${FROM_PERUN}" ] || log_msg E_EMPTY_LIST + sort "${FROM_PERUN}" > "${WORK_DIR}/perun-list.txt" + + # compare and action + rm -f /var/tmp/perun-hbase-delete.sh "${WORK_DIR}/add.hbase" "${WORK_DIR}/del.hbase" + diff "${WORK_DIR}/hbase-list.txt" "${WORK_DIR}/perun-list.txt" | while read op login; do + case "$op" in + '>') + # add user + [ -n "${login}" ] || log_msg E_EMPTY_USERNAME + + (echo "create_namespace '${login}'" + echo "grant '${login}', 'RWXCA', '@${login}'" + echo "grant '${login}', 'C'" + echo + ) >> "${WORK_DIR}/add.hbase" + log_msg I_HBASE_CREATED + ;; + '<') + # delete user (no real delete for now) + [ -n "${login}" ] || log_msg E_EMPTY_USERNAME + + if echo "${login}" | grep -q "${RESERVED}"; then + continue + fi + + # a) delete tables + echo "list '${login}:.*'" | hbase shell -n 2>/dev/null > "${WORK_DIR}/out.txt" || log_msg E_HBASE_TABLE_LIST + cat "${WORK_DIR}/out.txt" | awk '/^$/ {o=1;next} /.*/ && o {print $0}' > "${WORK_DIR}/hbase-user-tables.txt" + while read table; do + echo "disable '${table}'" + echo "drop '${table}'" + done < "${WORK_DIR}/hbase-user-tables.txt" >> "${WORK_DIR}/del.hbase" + # b) delete user + (echo "revoke '${login}', '@${login}'" + echo "revoke '${login}', 'C'" + echo + ) >> "${WORK_DIR}/del.hbase" + log_msg I_HBASE_DELETED + ;; + esac + done + + if [ -s "${WORK_DIR}/add.hbase" ]; then + cat "${WORK_DIR}/add.hbase" | su hbase -s /bin/bash -p -c "hbase shell -n" >/dev/null 2>&1 || log_msg E_HBASE_FAILED + log_msg I_HBASE_OK + fi + if [ -s "${WORK_DIR}/del.hbase" ]; then + mv "${WORK_DIR}/del.hbase" /var/tmp/perun-hbase-delete.sh + fi +} diff --git a/config/perun-nosec/process-hadoop_hdfs.sh b/config/perun-nosec/process-hadoop_hdfs.sh new file mode 100755 index 0000000..33502e5 --- /dev/null +++ b/config/perun-nosec/process-hadoop_hdfs.sh @@ -0,0 +1,53 @@ +#! /bin/bash + +PROTOCOL_VERSION='3.0.0' + +function process() { + FROM_PERUN="${WORK_DIR}/hadoop_hdfs" + + RESERVED='hbase\|hive\|oozie\|spark' + + I_HDFS_CREATED=(0 'Directory /user/${login} created.') + I_HDFS_DELETED=(0 'Directory /user/${login} deleted.') + E_KINIT_FAILED=(1 'Kinit on NameNode failed') + E_HDFS_LIST_FAILED=(2 'Cannot get list of directories from HDFS') + E_HDFS_MKDIR_FAILED=(2 'Cannot create directory /user/${login}') + E_HDFS_PERMS_FAILED=(2 'Cannot set permissions on /user/${login}') + E_EMPTY_LIST=(3 'The list is empty!') + E_EMPTY_USERNAME=(4 'Empty username') + + create_lock + chown hdfs "${WORK_DIR}" + + # get list from Hadoop HDFS + su hdfs -s /bin/bash -p -c "hdfs dfs -ls /user" >"${WORK_DIR}/hdfs-dirs.txt" || log_msg E_HDFS_LIST_FAILED + tail -n +2 "${WORK_DIR}/hdfs-dirs.txt" | sed 's,.* /user/,,' | sort > "${WORK_DIR}/hdfs-list.txt" + + # get list from Perun + [ -s "${FROM_PERUN}" ] || log_msg E_EMPTY_LIST + sort "${FROM_PERUN}" > "${WORK_DIR}/perun-list.txt" + + # compare and action + rm -f /var/tmp/perun-hdfs-delete.sh + diff "${WORK_DIR}/hdfs-list.txt" "${WORK_DIR}/perun-list.txt" | while read op login; do + case "$op" in + '>') + # add user + [ -n "${login}" ] || log_msg E_EMPTY_USERNAME + su hdfs -s /bin/bash -p -c "hdfs dfs -mkdir '/user/${login}'" || log_msg E_HDFS_MKDIR_FAILED + su hdfs -s /bin/bash -p -c "hdfs dfs -chown '${login}:hadoop' '/user/${login}'" || log_msg E_HDFS_PERMS_FAILED + su hdfs -s /bin/bash -p -c "hdfs dfs -chmod 0750 '/user/${login}'" || log_msg E_HDFS_PERMS_FAILED + log_msg I_HDFS_CREATED + ;; + '<') + # delete user (no real delete for now) + [ -n "${login}" ] || log_msg E_EMPTY_USERNAME + if echo "${login}" | grep -q "${RESERVED}"; then + continue + fi + echo "su hdfs -s /bin/bash -p -c \"hdfs dfs -rm -r '/user/${login}'\"" >> /var/tmp/perun-hdfs-delete.sh + #log_msg I_HDFS_DELETED + ;; + esac + done +} diff --git a/config/puppet.conf.hador b/config/puppet.conf.hador new file mode 100644 index 0000000..b7e53be --- /dev/null +++ b/config/puppet.conf.hador @@ -0,0 +1,15 @@ +[main] +logdir=/opt/puppet3-omnibus/embedded/var/log/puppet +vardir=/opt/puppet3-omnibus/embedded/var/lib/puppet +ssldir=/opt/puppet3-omnibus/embedded/var/lib/puppet/ssl +rundir=/opt/puppet3-omnibus/embedded/var/run/puppet +factpath=$vardir/lib/facter +templatedir=$confdir/templates +server=myriad7.zcu.cz + +[master] +# These are needed when the puppetmaster is run by passenger +# and can safely be removed if webrick is used. +ssl_client_header = SSL_CLIENT_S_DN +ssl_client_verify_header = SSL_CLIENT_VERIFY + diff --git a/docker.sh b/docker.sh index 8defd42..7017c88 100755 --- a/docker.sh +++ b/docker.sh @@ -19,15 +19,17 @@ # # 3) /etc/default/docker # -# DOCKER_OPTS="--cluster-advertise=br0:2376 --cluster-store=zk://hador-c1.ics.muni.cz,hador-c2.ics.muni.cz,hador.ics.muni.cz/docker-hador" +# DOCKER_OPTS="--cluster-advertise=ib0:2376 --cluster-store=zk://hador-c1.ics.muni.cz,hador-c2.ics.muni.cz,hador.ics.muni.cz/docker-hador" +# +# DOCKER_OPTS="--bridge=br0 --cluster-advertise=ib0:2376 --cluster-store=zk://hador-c1.ics.muni.cz,hador-c2.ics.muni.cz,hador.ics.muni.cz/docker-hador --default-gateway=147.251.9.1 --fixed-cidr=147.251.9.220/31 --ip-masq=false --iptables=false --ipv6=false" # # 4) docker overlay network created # # docker network create -d overlay vxlan # -#DOCKER_network='vxlan' -DOCKER_network='bridge' +DOCKER_network='vxlan' +#DOCKER_network='bridge' # get the first free network device @@ -73,13 +75,13 @@ if [ x"${1}" = x"init" ]; then i=$((i+1)) mkdir /data/${i}${DISK_SUBDIR} 2>/dev/null || : done - mkdir /scratch${DISK_SUBDIR} 2>/dev/null || : + mkdir /scratch${SCRATCH_SUBDIR} 2>/dev/null || : if [ ! -f ~/.ssh/id_rsa_docker ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa_docker cp -p ~/.ssh/id_rsa_docker.pub ~/.ssh/authorized_keys_docker - if [ -f `dirname $0`/authorized_keys ] - cat `dirname $0`/authorized_keys >> ~/.ssh/authorized_keys_docker + if [ -f `dirname $0`/config/authorized_keys ]; then + cat `dirname $0`/config/authorized_keys >> ~/.ssh/authorized_keys_docker fi fi fi @@ -88,14 +90,12 @@ for d in ${DISKS}; do i=$((i+1)) ARGS="${ARGS} -v /data/${i}${DISK_SUBDIR}:/data/${i}" done -ARGS="${ARGS} -v /scratch:/scratch" +ARGS="${ARGS} -v /scratch${SCRATCH_SUBDIR}:/scratch" docker run -itd \ - -v ~/.k5login:/root/.k5login \ -v ~/.ssh/authorized_keys_docker:/root/.ssh/authorized_keys \ - -v /etc/krb5.conf:/etc/krb5.conf \ --net=${DOCKER_network} \ - --restart=on-failure:5 \ + --restart=on-failure:0 \ ${ARGS} \ "$@" \ valtri/hadoop:puppetlabs \ @@ -109,7 +109,7 @@ fi # ==== public IPv6 ==== dev=veth`devname` # this is not persistent, let's create a script -cat << EOF > /var/run/docker/${FACTER_hostname}.sh +cat << EOF > /etc/docker/net-${FACTER_hostname}.sh mkdir -p /var/run/netns || : find -L /etc/ssl/certs -type l -delete || : pid=\`docker inspect -f '{{.State.Pid}}' ${FACTER_hostname}\` @@ -119,9 +119,9 @@ ip link add ${dev}a type veth peer name ${dev}b brctl addif ${XENBR} ${dev}a ip link set ${dev}a up ip link set ${dev}b netns \$pid -ip netns exec \$pid ip link set dev ${dev}b name eth6 -ip netns exec \$pid ip link set eth6 address ${FACTER_macaddress} -ip netns exec \$pid ip link set eth6 up +ip netns exec \$pid ip link set dev ${dev}b name public6 +ip netns exec \$pid ip link set public6 address ${FACTER_macaddress} +ip netns exec \$pid ip link set public6 up EOF -chmod +x /var/run/docker/${FACTER_hostname}.sh -sh -xe /var/run/docker/${FACTER_hostname}.sh +chmod +x /etc/docker/net-${FACTER_hostname}.sh +sh -xe /etc/docker/net-${FACTER_hostname}.sh diff --git a/machines/Makefile b/machines/Makefile new file mode 100644 index 0000000..b14e44a --- /dev/null +++ b/machines/Makefile @@ -0,0 +1,11 @@ +all: + for j in 1 2; do \ + for i in $(shell seq 1 24); do \ + ./hador-gen-virt-node.sh $${i} $${j} > hador$${i}-$${j}.sh; \ + done; \ + ./hador-gen-virt-master.sh 1 $${j} > hador-c1-$${j}.sh; \ + ./hador-gen-virt-master.sh 2 $${j} > hador-c2-$${j}.sh; \ + ./hador-gen-virt-frontend.sh $${j} > hador-$${j}.sh; \ + done + +.PHONY: all diff --git a/machines/hador-gen-virt-frontend.sh b/machines/hador-gen-virt-frontend.sh new file mode 100755 index 0000000..a1884d1 --- /dev/null +++ b/machines/hador-gen-virt-frontend.sh @@ -0,0 +1,26 @@ +#! /bin/sh + +NVIRT=$1 + +NIP=0 +if [ -z "${NVIRT}" ]; then + NVIRT=1 +fi + +NIP_HEX=`printf "%02x\n" ${NIP}` +NVIRT_HEX=`printf "%02x\n" ${NVIRT}` + +cat < []" + exit 1 +fi +NIP=`expr ${N} + 192` +if [ -z "${NVIRT}" ]; then + NVIRT=1 +fi + +NIP_HEX=`printf "%02x\n" ${NIP}` +NVIRT_HEX=`printf "%02x\n" ${NVIRT}` + +cat < /mnt/target/etc/network/interfaces +# This file describes the network interfaces available on your system +# and how to activate them. For more information, see interfaces(5). + +# The loopback network interface +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet static + address $FACTER_ipaddress + netmask $FACTER_netmask + broadcast $FACTER_broadcast + gateway $FACTER_gw + +__EOF__ + +cat << __EOF__ > /mnt/target/etc/fstab +# /etc/fstab: static file system information. +# +# Use 'blkid' to print the universally unique identifier for a +# device; this may be used with UUID= as a more robust way to name devices +# that works even if disks are added and removed. See fstab(5). +# +# +proc /proc proc defaults 0 0 +/dev/xvda / xfs defaults 0 1 +/dev/xvdb none swap sw 0 0 +__EOF__ + +perl -pi -e 's#1:2345:respawn:/sbin/getty 38400 tty1#1:2345:respawn:/sbin/getty --noclear 38400 hvc0#' /mnt/target/etc/inittab +HOTOVO=1 +while [ $HOTOVO -ne 0 ]; do + chroot . passwd + HOTOVO=$? +done + +#exit +#umount proc dev sys +cd /mnt +umount target + +cat << __EOF__ > /etc/xen/boot/${FACTER_hostname} +#---------------------------------------------------------------------------- +# Standard variables +kernel = "/boot/vmlinuz-3.2.0-4-amd64" +ramdisk = "/boot/initrd.img-3.2.0-4-amd64" +memory = ${SIZE_MEM} +name = "${FACTER_hostname}" +vcpus = ${SIZE_CPU} +vif = ['mac=${FACTER_macaddress}, bridge=${XENBR}'] +disk = [ 'phy:/dev/mapper/${VG}-${FACTER_hostname},xvda,w', + 'phy:/dev/mapper/${VG}-${FACTER_hostname}sw,xvdb,w' +] +root = "/dev/xvda ro" +extra = "clocksource=xen" +__EOF__ + +#xm create boot/gemini2 -c +echo "INFO: $0 done" +echo "INFO: xm create boot/${FACTER_hostname} -c" + + diff --git a/hador20-1.sh b/machines/public/hador20-1.sh similarity index 100% rename from hador20-1.sh rename to machines/public/hador20-1.sh diff --git a/hador20-2.sh b/machines/public/hador20-2.sh similarity index 100% rename from hador20-2.sh rename to machines/public/hador20-2.sh diff --git a/hador21-1.sh b/machines/public/hador21-1.sh similarity index 100% rename from hador21-1.sh rename to machines/public/hador21-1.sh diff --git a/hador21-2.sh b/machines/public/hador21-2.sh similarity index 100% rename from hador21-2.sh rename to machines/public/hador21-2.sh diff --git a/hador22-1.sh b/machines/public/hador22-1.sh similarity index 100% rename from hador22-1.sh rename to machines/public/hador22-1.sh diff --git a/hador22-2.sh b/machines/public/hador22-2.sh similarity index 100% rename from hador22-2.sh rename to machines/public/hador22-2.sh diff --git a/hador23-1.sh b/machines/public/hador23-1.sh similarity index 100% rename from hador23-1.sh rename to machines/public/hador23-1.sh diff --git a/hador23-2.sh b/machines/public/hador23-2.sh similarity index 100% rename from hador23-2.sh rename to machines/public/hador23-2.sh diff --git a/hador24-1.sh b/machines/public/hador24-1.sh similarity index 100% rename from hador24-1.sh rename to machines/public/hador24-1.sh diff --git a/hador24-2.sh b/machines/public/hador24-2.sh similarity index 100% rename from hador24-2.sh rename to machines/public/hador24-2.sh -- 1.8.2.3