From 0be54d047e001ff1d12a97fa7165dc5ec5645dd2 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Franti=C5=A1ek=20Dvo=C5=99=C3=A1k?= Date: Wed, 20 Jul 2016 23:01:12 +0200 Subject: [PATCH] Cleanups. --- 1-backup-conf.sh | 14 ++++++--- 2-upgrade.sh | 37 ++++++++++++---------- HOWTO.sh | 94 -------------------------------------------------------- README.md | 5 +++ 4 files changed, 36 insertions(+), 114 deletions(-) delete mode 100644 HOWTO.sh create mode 100644 README.md diff --git a/1-backup-conf.sh b/1-backup-conf.sh index 6a8b457..9cdc800 100755 --- a/1-backup-conf.sh +++ b/1-backup-conf.sh @@ -1,9 +1,15 @@ #! /bin/sh -mkdir -p ~/backup-cdh547/default || : -cd ~/backup-cdh547/ +oldver="${1}" +if test -z "${1}"; then + echo "Usage $0 OLD_VERSION" + exit 1 +fi -for d in hadoop hbase hive zookeeper spark pig oozie impala sentry; do +mkdir -p ~/backup-cdh${oldver}/default || : +cd ~/backup-cdh${oldver}/ + +for d in hadoop hbase hive hue zookeeper spark pig oozie impala sentry; do if test -d /etc/${d}; then cp -aL /etc/${d}/conf ${d} for f in dist empty; do @@ -12,6 +18,6 @@ for d in hadoop hbase hive zookeeper spark pig oozie impala sentry; do fi done fi - cp /etc/default/${d}* default/ 2>/dev/null || : + cp -p /etc/default/${d}* default/ 2>/dev/null || : done ls -la diff --git a/2-upgrade.sh b/2-upgrade.sh index 4fbae69..4f3819f 100755 --- a/2-upgrade.sh +++ b/2-upgrade.sh @@ -1,7 +1,24 @@ #! /bin/bash -xe alternative='cluster' -oldver='5.4.7' +oldver="${1}" +if test -z "${1}"; then + echo "Usage $0 OLD_VERSION" + exit 1 +fi + +cdh_stop() { + hbmanager stop || : + hivemanager stop || : + impmanager stop || : + service hue stop || : + service oozie stop || : + service spark-history-server stop || : + service spark-master stop || : + yellowmanager stop || : + service zookeeper-server stop || : + ps xafuw | grep java || : +} service puppet stop || : @@ -20,14 +37,8 @@ apt-get update apt-get dist-upgrade -y -d # move away old configs -hbmanager stop || : -hivemanager stop || : -service spark-history-server stop || : -service spark-master stop || : -yellowmanager stop || : -service zookeeper-server stop || : -ps xafuw | grep java || : -for d in hadoop hbase hive zookeeper spark pig oozie impala sentry; do +cdh_stop +for d in hadoop hbase hive hue zookeeper spark pig oozie impala sentry; do if test -d /etc/${d}/conf.${alternative}; then mv /etc/${d}/conf.${alternative} /etc/${d}/conf.cdh${oldver} update-alternatives --auto ${d}-conf @@ -39,13 +50,7 @@ test -f ${shs} && mv -v ${shs} ${shs}.fuck || : # upgrade! apt-get dist-upgrade -y -hbmanager stop || : -hivemanager stop || : -service spark-history-server stop || : -service spark-master-server stop || : -yellowmanager stop || : -service zookeeper-server stop || : -ps xafuw | grep java || : +cdh_stop # replace by the new configs puppet agent --test diff --git a/HOWTO.sh b/HOWTO.sh deleted file mode 100644 index 010ad4c..0000000 --- a/HOWTO.sh +++ /dev/null @@ -1,94 +0,0 @@ -# see https://wiki.metacentrum.cz/metawiki/U%C5%BEivatel:Valtri/Hadoop/Installation#Upgrade - -# (0) mirror -... - -# (0) backup metadata -rsync -av --delete /data/ ~/backup/data/ - - -# (1) scripts -# upravit na /opt!!! -# upravit oldversion -... -#for m in -c1 -c2 '' `seq 1 24`; do scp -p *.sh root@hador${m}:~/; done -#for m in 12 4 14 15 8; do scp -p *.sh root@myriad${m}:~/; done -for m in 75 90 98 99 100 101 102; do scp -p *.sh root@took${m}:~/; done - - -# (2) [all] backup -~/1-backup-conf.sh - - -# (3) [any NN] take a snapshot -su hdfs -s /bin/bash -export KRB5CCNAME=FILE:/tmp/krb5cc_hdfs_admin -kinit -k -t /etc/security/keytab/nn.service.keytab nn/`hostname -f`@ICS.MUNI.CZ -##non-HA -#hdfs dfsadmin -safemode enter -hdfs dfsadmin -rollingUpgrade prepare -while true; do hdfs dfsadmin -rollingUpgrade query; sleep 30; done - - -# (4) [standby NN] upgrade NN2 -~/2-upgrade.sh -service hadoop-hdfs-namenode stop -service hadoop-hdfs-namenode rollingUpgradeStarted - - -# (5) [active NN] upgrade NN1 -~/2-upgrade.sh -service hadoop-hdfs-namenode stop -service hadoop-hdfs-namenode rollingUpgradeStarted - - -# (6) [DN,front,RM,...] upgrade datanodes, frontends (other controllers probably first, paralelization according to dfs.replication) -~/2-upgrade.sh - - -# (7) [any NN] finalize -su hdfs -s /bin/bash -export KRB5CCNAME=FILE:/tmp/krb5cc_hdfs_admin -kinit -k -t /etc/security/keytab/nn.service.keytab nn/`hostname -f`@ICS.MUNI.CZ -hdfs dfsadmin -rollingUpgrade finalize -hdfs fsck / - - -# (8) [Spark HS] -# obnovit startup skript -... - - -# (9) [Hive Metastore, pokud třeba] -hivemanager stop -#nebo: -#schematool -dbType mysql -upgradeSchemaFrom 0.13.0 -mysqldump --opt metastore > metastore_cdh250.sql -mysqldump --skip-add-drop-table --no-data metastore > my-schema-cdh250.mysql.sql -cd /usr/lib/hive/scripts/metastore/upgrade/mysql -mysql metastore - \. upgrade-0.13.0-to-0.14.0.mysql.sql - \. upgrade-0.14.0-to-1.1.0.mysql.sql -hivemanager start - - -# (10) [any NN - kuli kredencím] Spark Jar -su hdfs -s /bin/bash -export KRB5CCNAME=FILE:/tmp/krb5cc_hdfs_admin -hdfs dfs -ls /user/spark/share/lib/spark-assembly.jar -hdfs dfs -rm /user/spark/share/lib/spark-assembly.jar && hdfs dfs -put /usr/lib/spark/lib/spark-assembly.jar /user/spark/share/lib/spark-assembly.jar -hdfs dfs -ls /user/spark/share/lib/spark-assembly.jar - - -# (11) [all] -reboot - - -# (12) [front] test everything (example jobs: Hadoop, HBase, Hive, Pig, Spark; service pages + node versions, logy, fungujici log aggregation, ...) -./hadoop-test.sh; echo $? - - -# (13) update LogBook a wiki: -#https://wiki.metacentrum.cz/metawiki/U%C5%BEivatel:Valtri/Hadoop/LogBook -#https://wiki.metacentrum.cz/wiki/Hadoop#Instalovan.C3.BD_SW -... diff --git a/README.md b/README.md new file mode 100644 index 0000000..901fbe7 --- /dev/null +++ b/README.md @@ -0,0 +1,5 @@ +# Hadoop Upgrade Scripts + +## Usage + +See [MetaCentrum wiki Hadoop Admin page](https://wiki.metacentrum.cz/metawiki/U%C5%BEivatel:Valtri/Hadoop/Installation#Upgrade). -- 1.8.2.3