From 58fba3200aacc64bdc03d664a307ebb357122c98 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Franti=C5=A1ek=20Dvo=C5=99=C3=A1k?= Date: Fri, 5 Feb 2016 12:38:48 +0100 Subject: [PATCH] Initial import. --- 1-backup-conf.sh | 17 ++++++++++ 2-upgrade.sh | 56 +++++++++++++++++++++++++++++++++ HOWTO.sh | 94 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 167 insertions(+) create mode 100755 1-backup-conf.sh create mode 100755 2-upgrade.sh create mode 100644 HOWTO.sh diff --git a/1-backup-conf.sh b/1-backup-conf.sh new file mode 100755 index 0000000..6a8b457 --- /dev/null +++ b/1-backup-conf.sh @@ -0,0 +1,17 @@ +#! /bin/sh + +mkdir -p ~/backup-cdh547/default || : +cd ~/backup-cdh547/ + +for d in hadoop hbase hive zookeeper spark pig oozie impala sentry; do + if test -d /etc/${d}; then + cp -aL /etc/${d}/conf ${d} + for f in dist empty; do + if test -d /etc/${d}/conf.${f}; then + cp -aL /etc/${d}/conf.${f} ${d}.${f} + fi + done + fi + cp /etc/default/${d}* default/ 2>/dev/null || : +done +ls -la diff --git a/2-upgrade.sh b/2-upgrade.sh new file mode 100755 index 0000000..4fbae69 --- /dev/null +++ b/2-upgrade.sh @@ -0,0 +1,56 @@ +#! /bin/bash -xe + +alternative='cluster' +oldver='5.4.7' + +service puppet stop || : + +## upgrade +#echo "Upgrade? (CTRL-C for stop)" +#read X + +test -d /hdfs && umount -f /hdfs || : +# for HA: to standby +#service hadoop-hdfs-zkfc stop +#service hadoop-yarn-resourcemanager stop + +# new repo + download +sed -e 's,/repos/hadoop/,/repos/hadoop-test/,' -i /etc/apt/sources.list.d/cloudera.list +apt-get update +apt-get dist-upgrade -y -d + +# move away old configs +hbmanager stop || : +hivemanager stop || : +service spark-history-server stop || : +service spark-master stop || : +yellowmanager stop || : +service zookeeper-server stop || : +ps xafuw | grep java || : +for d in hadoop hbase hive zookeeper spark pig oozie impala sentry; do + if test -d /etc/${d}/conf.${alternative}; then + mv /etc/${d}/conf.${alternative} /etc/${d}/conf.cdh${oldver} + update-alternatives --auto ${d}-conf + fi +done +rm -fv ~hbase/.puppet-ssl-facl +shs='/etc/init.d/spark-history-server' +test -f ${shs} && mv -v ${shs} ${shs}.fuck || : + +# upgrade! +apt-get dist-upgrade -y +hbmanager stop || : +hivemanager stop || : +service spark-history-server stop || : +service spark-master-server stop || : +yellowmanager stop || : +service zookeeper-server stop || : +ps xafuw | grep java || : + +# replace by the new configs +puppet agent --test +#/opt/puppet3-omnibus/bin/puppet agent --test + +# for HA (during puppet): +#service hadoop-hdfs-namenode stop +#service hadoop-hdfs-namenode rollingUpgradeStarted diff --git a/HOWTO.sh b/HOWTO.sh new file mode 100644 index 0000000..010ad4c --- /dev/null +++ b/HOWTO.sh @@ -0,0 +1,94 @@ +# see https://wiki.metacentrum.cz/metawiki/U%C5%BEivatel:Valtri/Hadoop/Installation#Upgrade + +# (0) mirror +... + +# (0) backup metadata +rsync -av --delete /data/ ~/backup/data/ + + +# (1) scripts +# upravit na /opt!!! +# upravit oldversion +... +#for m in -c1 -c2 '' `seq 1 24`; do scp -p *.sh root@hador${m}:~/; done +#for m in 12 4 14 15 8; do scp -p *.sh root@myriad${m}:~/; done +for m in 75 90 98 99 100 101 102; do scp -p *.sh root@took${m}:~/; done + + +# (2) [all] backup +~/1-backup-conf.sh + + +# (3) [any NN] take a snapshot +su hdfs -s /bin/bash +export KRB5CCNAME=FILE:/tmp/krb5cc_hdfs_admin +kinit -k -t /etc/security/keytab/nn.service.keytab nn/`hostname -f`@ICS.MUNI.CZ +##non-HA +#hdfs dfsadmin -safemode enter +hdfs dfsadmin -rollingUpgrade prepare +while true; do hdfs dfsadmin -rollingUpgrade query; sleep 30; done + + +# (4) [standby NN] upgrade NN2 +~/2-upgrade.sh +service hadoop-hdfs-namenode stop +service hadoop-hdfs-namenode rollingUpgradeStarted + + +# (5) [active NN] upgrade NN1 +~/2-upgrade.sh +service hadoop-hdfs-namenode stop +service hadoop-hdfs-namenode rollingUpgradeStarted + + +# (6) [DN,front,RM,...] upgrade datanodes, frontends (other controllers probably first, paralelization according to dfs.replication) +~/2-upgrade.sh + + +# (7) [any NN] finalize +su hdfs -s /bin/bash +export KRB5CCNAME=FILE:/tmp/krb5cc_hdfs_admin +kinit -k -t /etc/security/keytab/nn.service.keytab nn/`hostname -f`@ICS.MUNI.CZ +hdfs dfsadmin -rollingUpgrade finalize +hdfs fsck / + + +# (8) [Spark HS] +# obnovit startup skript +... + + +# (9) [Hive Metastore, pokud třeba] +hivemanager stop +#nebo: +#schematool -dbType mysql -upgradeSchemaFrom 0.13.0 +mysqldump --opt metastore > metastore_cdh250.sql +mysqldump --skip-add-drop-table --no-data metastore > my-schema-cdh250.mysql.sql +cd /usr/lib/hive/scripts/metastore/upgrade/mysql +mysql metastore + \. upgrade-0.13.0-to-0.14.0.mysql.sql + \. upgrade-0.14.0-to-1.1.0.mysql.sql +hivemanager start + + +# (10) [any NN - kuli kredencím] Spark Jar +su hdfs -s /bin/bash +export KRB5CCNAME=FILE:/tmp/krb5cc_hdfs_admin +hdfs dfs -ls /user/spark/share/lib/spark-assembly.jar +hdfs dfs -rm /user/spark/share/lib/spark-assembly.jar && hdfs dfs -put /usr/lib/spark/lib/spark-assembly.jar /user/spark/share/lib/spark-assembly.jar +hdfs dfs -ls /user/spark/share/lib/spark-assembly.jar + + +# (11) [all] +reboot + + +# (12) [front] test everything (example jobs: Hadoop, HBase, Hive, Pig, Spark; service pages + node versions, logy, fungujici log aggregation, ...) +./hadoop-test.sh; echo $? + + +# (13) update LogBook a wiki: +#https://wiki.metacentrum.cz/metawiki/U%C5%BEivatel:Valtri/Hadoop/LogBook +#https://wiki.metacentrum.cz/wiki/Hadoop#Instalovan.C3.BD_SW +... -- 1.8.2.3