CREATE VIEW view_measures AS SELECT m.* FROM measure m, statistic s WHERE s.last_id_measure = m.id_measure;
CREATE VIEW view_hdfs AS SELECT m.seq, m.time, h.hostname, h.full, h.disk, h.disk_used, h.disk_free, h.block_under, h.block_corrupt, h.block_missing FROM hdfs h, measure m WHERE h.id_measure=m.id_measure;
-CREATE VIEW view_quotas AS SELECT m.seq, m.time, q.user, q.used FROM quota q, measure m WHERE q.id_measure=m.id_measure;
+CREATE VIEW view_quota AS SELECT m.seq, m.time, q.user, q.used FROM quota q, measure m WHERE q.id_measure=m.id_measure;
CREATE VIEW view_jobstat AS SELECT m.seq, m.time, m.start, m.end, j.user, j.total, j.fails, j.wait_min, j.wait_avg, j.wait_max FROM jobstat j, measure m WHERE j.id_measure=m.id_measure;
--- /dev/null
+#
+# Parsing output of:
+#
+# hdfs dfs -du -s '/user/*'
+#
+
+function dbstr(s) {
+ if (s) { return "'" s "'" }
+ else { return "NULL" }
+}
+
+function dbi(i) {
+ if (i >= 0) { return i }
+ else { return "NULL" }
+}
+
+BEGIN {
+ FS="[ \t/]+"
+ print "INSERT INTO measure (name) VALUES ('quota');";
+}
+
+{
+ used=$1
+ user=$4
+ print "INSERT INTO quota (id_measure, user, used) VALUES (last_insert_id(), " dbstr(user) ", " dbi(used) ");"
+}
$db_password = undef,
$email = undef,
$hdfs = undef,
+ $quota = undef,
$principal = undef,
) {
- file {'/usr/local/bin/accounting-hdfs':
- owner => 'root',
- group => 'root',
- mode => '0755',
- content => template('site_hadoop/accounting/hdfs.sh.erb'),
- }
-
+ # common
file{'/usr/local/share/hadoop':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
}
- ->
- file {'/usr/local/share/hadoop/accounting-hdfs.awk':
- owner => 'root',
- group => 'root',
- mode => '0644',
- source => 'puppet:///modules/site_hadoop/accounting/hdfs.awk',
+ file{"${site_hadoop::defaultconfdir}/hadoop-accounting":
+ owner => 'hdfs',
+ group => 'hdfs',
+ mode => '0400',
+ content => template('site_hadoop/accounting/hadoop-accounting.erb'),
}
-
file{'/usr/local/share/hadoop/accounting.sql':
owner => 'root',
group => 'root',
source => 'puppet:///modules/site_hadoop/accounting/create.sql',
}
- file{"${site_hadoop::defaultconfdir}/hadoop-accounting":
- owner => 'hdfs',
- group => 'hdfs',
- mode => '0400',
- content => template('site_hadoop/accounting/hadoop-accounting.erb'),
+ # hdfs data
+ file {'/usr/local/bin/accounting-hdfs':
+ owner => 'root',
+ group => 'root',
+ mode => '0755',
+ content => template('site_hadoop/accounting/hdfs.sh.erb'),
+ }
+ file {'/usr/local/share/hadoop/accounting-hdfs.awk':
+ owner => 'root',
+ group => 'root',
+ mode => '0644',
+ source => 'puppet:///modules/site_hadoop/accounting/hdfs.awk',
+ require => File['/usr/local/share/hadoop'],
}
-
if $hdfs {
file{'/etc/cron.d/accounting-hdfs':
owner => 'root',
ensure => 'absent',
}
}
+
+ # user quota
+ file {'/usr/local/bin/accounting-quota':
+ owner => 'root',
+ group => 'root',
+ mode => '0755',
+ content => template('site_hadoop/accounting/quota.sh.erb'),
+ }
+ file {'/usr/local/share/hadoop/accounting-quota.awk':
+ owner => 'root',
+ group => 'root',
+ mode => '0644',
+ source => 'puppet:///modules/site_hadoop/accounting/quota.awk',
+ require => File['/usr/local/share/hadoop'],
+ }
+ if $quota {
+ file{'/etc/cron.d/accounting-quota':
+ owner => 'root',
+ group => 'root',
+ mode => '0644',
+ content => template('site_hadoop/accounting/cron-quota.erb'),
+ }
+ } else {
+ file{'/etc/cron.d/accounting-quota':
+ ensure => 'absent',
+ }
+ }
}
--- /dev/null
+<% if @email -%>
+MAILTO='<%= @email -%>'
+
+<% end -%>
+<%= @quota -%> hdfs /usr/local/bin/accounting-quota
--- /dev/null
+#! /bin/sh -e
+
+PREFIX='/usr/local'
+DEFAULTDIR='<%= scope.lookupvar('site_hadoop::defaultconfdir') -%>'
+export KRB5CCNAME='FILE:/tmp/krb5cc_hdfs_stat'
+KEYTAB='FILE:/etc/security/keytab/nn.service.keytab'
+PRINCIPAL="nn/`hostname -f`"
+MYSQL_DB='accounting'
+MYSQL_USER='accounting'
+MYSQL_PASSWORD=''
+
+if test -f ${DEFAULTDIR}/hadoop-accounting; then
+ . ${DEFAULTDIR}/hadoop-accounting
+fi
+
+if test -n "${PRINCIPAL}"; then
+ kinit -k -t ${KEYTAB} -l 5m ${PRINCIPAL}
+fi
+
+rm -f /tmp/accounting.quota.txt
+hdfs dfs -du -s '/user/*' >/tmp/accounting.quota.txt
+
+if test -n "${PRINCIPAL}"; then
+ kdestroy
+fi
+
+cat /tmp/accounting.quota.txt | awk -f ${PREFIX}/share/hadoop/accounting-quota.awk | mysql --user ${MYSQL_USER} --password=${MYSQL_PASSWORD} ${MYSQL_DB}