Sprout from connpool_branch 2006-10-18 08:33:06 UTC Zdeněk Šustr <sustr4@cesnet.cz> 'Removed unnecessary comments and debugging printouts VOLUME II.'
Delete:
org.glite.lb.logger/.cvsignore
org.glite.lb.logger/LICENSE
org.glite.lb.logger/Makefile
org.glite.lb.logger/build.xml
org.glite.lb.logger/config/glite-lb-logger.config.xml
org.glite.lb.logger/config/glite-lb-logger.default-properties
org.glite.lb.logger/config/startup
org.glite.lb.logger/project/build.number
org.glite.lb.logger/project/build.properties
org.glite.lb.logger/project/configure.properties.xml
org.glite.lb.logger/project/properties.xml
org.glite.lb.logger/project/tar_exclude
org.glite.lb.logger/project/version.properties
org.glite.lb.logger/src/event_queue.c
org.glite.lb.logger/src/event_store.c
org.glite.lb.logger/src/il_error.c
org.glite.lb.logger/src/il_error.h
org.glite.lb.logger/src/il_master.c
org.glite.lb.logger/src/input_queue_socket.c
org.glite.lb.logger/src/interlogd.c
org.glite.lb.logger/src/interlogd.h
org.glite.lb.logger/src/logd.c
org.glite.lb.logger/src/logd_proto.c
org.glite.lb.logger/src/logd_proto.h
org.glite.lb.logger/src/perftest_il.sh
org.glite.lb.logger/src/perftest_ll.sh
org.glite.lb.logger/src/queue_mgr.c
org.glite.lb.logger/src/queue_thread.c
org.glite.lb.logger/src/recover.c
org.glite.lb.logger/src/send_event.c
org.glite.lb.logger/src/server_msg.c
org.glite.lb.logger/test/IlTestBase.cpp
org.glite.lb.logger/test/IlTestBase.h
org.glite.lb.logger/test/event_queueTest.cpp
org.glite.lb.logger/test/event_storeTest.cpp
org.glite.lb.logger/test/il_test.cpp
org.glite.lb.logger/test/input_queue_socketTest.cpp
org.glite.lb.logger/test/ll_test.cpp
org.glite.lb.logger/test/logd_proto_test.c
org.glite.lb.logger/test/server_msgTest.cpp
org.glite.lb.proxy/.cvsignore
org.glite.lb.proxy/LICENSE
org.glite.lb.proxy/Makefile
org.glite.lb.proxy/build.xml
org.glite.lb.proxy/config/glite-lb-dbsetup-proxy.sql
org.glite.lb.proxy/config/startup
org.glite.lb.proxy/doc/README
org.glite.lb.proxy/doc/README.deploy
org.glite.lb.proxy/examples/test.sh
org.glite.lb.proxy/examples/test1.sh
org.glite.lb.proxy/examples/test2.sh
org.glite.lb.proxy/examples/test3.sh
org.glite.lb.proxy/project/build.number
org.glite.lb.proxy/project/build.properties
org.glite.lb.proxy/project/configure.properties.xml
org.glite.lb.proxy/project/properties.xml
org.glite.lb.proxy/project/tar_exclude
org.glite.lb.proxy/project/version.properties
org.glite.lb.proxy/src/fake_write2rgma.c
org.glite.lb.proxy/src/lbproxy.c
org.glite.lb.proxy/src/perftest_proxy.sh
org.glite.lb.utils/.cvsignore
org.glite.lb.utils/LICENSE
org.glite.lb.utils/Makefile
org.glite.lb.utils/build.xml
org.glite.lb.utils/doc/README.LB-monitoring
org.glite.lb.utils/doc/README.LB-statistics
org.glite.lb.utils/doc/glite-lb-mon-db.1
org.glite.lb.utils/doc/glite-lb-mon.1
org.glite.lb.utils/examples/glite-lb-index.conf
org.glite.lb.utils/project/build.number
org.glite.lb.utils/project/build.properties
org.glite.lb.utils/project/configure.properties.xml
org.glite.lb.utils/project/properties.xml
org.glite.lb.utils/project/tar_exclude
org.glite.lb.utils/project/version.properties
org.glite.lb.utils/src/mon-db.c
org.glite.lb.utils/src/mon.c
org.glite.lb.utils/src/statistics.c
org.glite.lb.ws-interface/.Makefile.swp
org.glite.lb.ws-interface/.cvsignore
org.glite.lb.ws-interface/LICENSE
org.glite.lb.ws-interface/Makefile
org.glite.lb.ws-interface/build.xml
org.glite.lb.ws-interface/project/build.number
org.glite.lb.ws-interface/project/build.properties
org.glite.lb.ws-interface/project/configure.properties.xml
org.glite.lb.ws-interface/project/properties.xml
org.glite.lb.ws-interface/project/tar_exclude
org.glite.lb.ws-interface/project/version.properties
org.glite.lb.ws-interface/src/LB.xml
org.glite.lb.ws-interface/src/LBTypes.xml.T
org.glite.lb.ws-interface/src/doc.xml
org.glite.lb.ws-interface/src/puke-ug.xsl
org.glite.lb.ws-interface/src/puke-wsdl.xsl
org.glite.lb/.cvsignore
org.glite.lb/LICENSE
org.glite.lb/build.xml
org.glite.lb/deployment/README
org.glite.lb/deployment/deploy_jp.diff
org.glite.lb/deployment/deploy_lb.diff
org.glite.lb/doc/README.lb4vdt
org.glite.lb/doc/copyright.tex
org.glite.lb/doc/frontmatter.tex
org.glite.lb/doc/testplan.tex
org.glite.lb/lb4vdt/LB_install.sh
org.glite.lb/lb4vdt/Makefile.inc
org.glite.lb/lb4vdt/patches/org.gridsite.core.patch
org.glite.lb/lb4vdt/scripts/org.gridsite.core.build
org.glite.lb/project/MultiStruct.pm
org.glite.lb/project/StructField.pm
org.glite.lb/project/at3
org.glite.lb/project/build.number
org.glite.lb/project/build.properties
org.glite.lb/project/dependencies.properties
org.glite.lb/project/events.T
org.glite.lb/project/glite.lb.csf.xml
org.glite.lb/project/properties.xml
org.glite.lb/project/run-workspace
org.glite.lb/project/status.T
org.glite.lb/project/taskdefs.xml
org.glite.lb/project/types.T
org.glite.lb/project/version.properties
+++ /dev/null
-.project
-.cdtproject
\ No newline at end of file
+++ /dev/null
-LICENSE file for EGEE Middleware\r
-================================\r
-\r
-Copyright (c) 2004 on behalf of the EU EGEE Project: \r
-The European Organization for Nuclear Research (CERN), \r
-Istituto Nazionale di Fisica Nucleare (INFN), Italy\r
-Datamat Spa, Italy\r
-Centre National de la Recherche Scientifique (CNRS), France\r
-CS Systeme d'Information (CSSI), France\r
-Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden\r
-Universiteit van Amsterdam (UvA), Netherlands\r
-University of Helsinki (UH.HIP), Finlan\r
-University of Bergen (UiB), Norway\r
-Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom\r
-\r
-Redistribution and use in source and binary forms, with or without\r
-modification, are permitted provided that the following conditions are\r
-met: \r
-\r
-1. Redistributions of source code must retain the above copyright\r
-notice, this list of conditions and the following disclaimer.\r
-\r
-2. Redistributions in binary form must reproduce the above copyright\r
-notice, this list of conditions and the following disclaimer in the\r
-documentation and/or other materials provided with the distribution.\r
-\r
-3. The end-user documentation included with the redistribution, if\r
-any, must include the following acknowledgment: "This product includes\r
-software developed by The EU EGEE Project (http://cern.ch/eu-egee/)."\r
-Alternatively, this acknowledgment may appear in the software itself, if\r
-and wherever such third-party acknowledgments normally appear.\r
-\r
-4. The names EGEE and the EU EGEE Project must not be\r
-used to endorse or promote products derived from this software without\r
-prior written permission. For written permission, please contact\r
-<email address>.\r
-\r
-5. You are under no obligation whatsoever to provide anyone with any\r
-bug fixes, patches, or upgrades to the features, functionality or\r
-performance of the Software ("Enhancements") that you may develop over\r
-time; however, if you choose to provide your Enhancements to The EU\r
-EGEE Project, or if you choose to otherwise publish or distribute your\r
-Enhancements, in source code form without contemporaneously requiring\r
-end users of The EU EGEE Proejct to enter into a separate written license\r
-agreement for such Enhancements, then you hereby grant The EU EGEE Project\r
-a non-exclusive, royalty-free perpetual license to install, use, copy,\r
-modify, prepare derivative works, incorporate into the EGEE Middleware\r
-or any other computer software, distribute, and sublicense your\r
-Enhancements or derivative works thereof, in binary and source code\r
-form (if any), whether developed by The EU EGEE Project or third parties.\r
-\r
-THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED\r
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\r
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r
-DISCLAIMED. IN NO EVENT SHALL PROJECT OR ITS CONTRIBUTORS BE LIABLE\r
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\r
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\r
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\r
-BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\r
-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\r
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN\r
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
-\r
-This software consists of voluntary contributions made by many\r
-individuals on behalf of the EU EGEE Prject. For more information on The\r
-EU EGEE Project, please see http://cern.ch/eu-egee/. For more information on\r
-EGEE Middleware, please see http://egee-jra1.web.cern.ch/egee-jra1/\r
-\r
-\r
+++ /dev/null
-# defaults
-top_srcdir=.
-builddir=build
-top_builddir=${top_srcdir}/${builddir}
-stagedir=.
-distdir=.
-globalprefix=glite
-lbprefix=lb
-package=glite-lb-client
-version=0.0.0
-PREFIX=/opt/glite
-
-glite_location=/opt/glite
-globus_prefix=/opt/globus
-nothrflavour=gcc32
-thrflavour=gcc32pthr
-expat_prefix=/opt/expat
-
--include Makefile.inc
--include ../project/version.properties
-
-version=${module.version}
-
-CC=gcc
-
-VPATH:=${top_srcdir}/src:${top_srcdir}/test
-
-VERSION=-DVERSION=\"GLite-${version}\"
-
-SUFFIXES=.no
-
-GLOBUSINC=-I${globus_prefix}/include/${nothrflavour}
-
-GLOBUSTHRINC=-I${globus_prefix}/include/${thrflavour}
-
-ifdef LB_STANDALONE
- LB_STANDALONE_FLAGS:=-DLB_STANDALONE
-endif
-
-
-DEBUG:=-g -O0
-CFLAGS:=${DEBUG} \
- -I${stagedir}/include -I${top_srcdir}/src \
- -D_GNU_SOURCE \
- ${COVERAGE_FLAGS} \
- ${VERSION} ${LB_STANDALONE_FLAGS} ${LB_PERF_FLAGS}
-
-LDFLAGS:=-L${stagedir}/lib \
- ${COVERAGE_FLAGS}
-LINK:=libtool --mode=link ${CC} ${LDFLAGS}
-LINKXX:=libtool --mode=link ${CXX} -rpath ${stagedir}/lib ${LDFLAGS}
-INSTALL:=libtool --mode=install install
-
-GLOBUS_LIBS:= -L${globus_prefix}/lib \
- -lglobus_common_${nothrflavour} \
- -lglobus_gssapi_gsi_${nothrflavour}
-
-GLOBUS_THRLIBS:= -L${globus_prefix}/lib \
- -lglobus_common_${thrflavour} \
- -lglobus_gssapi_gsi_${thrflavour}
-
-ifneq (${expat_prefix},/usr)
- EXPAT_LIBS:=-L${expat_prefix}/lib
-endif
-EXPAT_LIBS:=${EXPAT_LIBS} -lexpat
-
-EXT_LIBS:= ${EXPAT_LIBS}
-
-COMMON_LIB:=-lglite_lb_common
-
-GLITE_GSS_LIB:=-lglite_security_gss
-
-TEST_LIBS:=-L${cppunit_prefix}/lib -lcppunit
-TEST_INC:=-I${cppunit_prefix}/include
-
-ifdef LB_PERF
- LOGD:=glite-lb-logd-perf glite-lb-logd-perf-nofile
- INTERLOGD:=glite-lb-interlogd-perf \
- glite-lb-interlogd-perf-empty
-# glite-lb-interlogd-perf-inline-empty
- NOTIF_INTERLOGD:=glite-lb-notif-interlogd
-else
- LOGD:=glite-lb-logd
- INTERLOGD:=glite-lb-interlogd
- NOTIF_INTERLOGD:=glite-lb-notif-interlogd
-endif
-
-
-LOGD_OBJS:= logd_proto.o logd.o
-
-LOGD_NOBJS:=${LOGD_OBJS:.o=.no}
-
-INTERLOG_OBJS:=il_error.o input_queue_socket.o \
- recover.o send_event.o \
- event_queue.o event_store.o il_master.o interlogd.o \
- queue_mgr.o server_msg.o queue_thread.o
-
-INTERLOG_NOBJS:=${INTERLOG_OBJS:.o=.no}
-INTERLOG_PERF_OBJS:=${INTERLOG_OBJS:.o=.po}
-INTERLOG_EMPTY_OBJS:=${INTERLOG_OBJS:.o=.eo}
-#INTERLOG_INLINE_EMPTY_OBJS:=${INTERLOG_OBJS:.o=.io}
-
-INTERLOG_TEST_OBJS:= \
- il_error.o \
- server_msg.o \
- server_msgTest.o \
- queue_thread.o \
- event_store.o \
- event_storeTest.o \
- queue_mgr.o \
- il_master.o \
- input_queue_socket.o \
- input_queue_socketTest.o \
- send_event.o \
- event_queue.o \
- event_queueTest.o \
- IlTestBase.o \
- il_test.o
-
-default: all
-
-all compile: $(LOGD) $(INTERLOGD) $(NOTIF_INTERLOGD)
-
-glite-lb-logd: ${LOGD_OBJS}
- ${LINK} -o $@ ${LOGD_OBJS} ${COMMON_LIB}_${nothrflavour}
-
-glite-lb-logd-perf: ${LOGD_OBJS}
- ${LINK} -o $@ ${LOGD_OBJS} ${COMMON_LIB}_${nothrflavour}
-
-glite-lb-logd-nofile: ${LOGD_NOBJS}
- ${LINK} -o $@ ${LOGD_NOBJS} ${COMMON_LIB}_${nothrflavour}
-
-glite-lb-logd-perf-nofile: ${LOGD_NOBJS}
- ${LINK} -o $@ ${LOGD_NOBJS} ${COMMON_LIB}_${nothrflavour}
-
-glite-lb-interlogd: ${INTERLOG_OBJS}
- ${LINK} -o $@ ${INTERLOG_OBJS} ${COMMON_LIB}_${thrflavour}
-
-glite-lb-notif-interlogd: ${INTERLOG_NOBJS}
- ${LINK} -o $@ ${INTERLOG_NOBJS} ${COMMON_LIB}_${thrflavour}
-
-glite-lb-interlogd-perf: ${INTERLOG_OBJS}
- ${LINK} -o $@ ${INTERLOG_OBJS} ${COMMON_LIB}_${thrflavour}
-
-glite-lb-interlogd-perf-empty: ${INTERLOG_EMPTY_OBJS}
- ${LINK} -o $@ ${INTERLOG_EMPTY_OBJS} ${COMMON_LIB}_${thrflavour}
-
-#glite-lb-interlogd-perf-inline-empty: ${INTERLOG_INLINE_EMPTY_OBJS}
-# ${LINK} -o $@ ${INTERLOG_INLINE_EMPTY_OBJS} \
-# ${COMMON_LIB}_${thrflavour}
-
-stage: compile
- $(MAKE) install PREFIX=${stagedir} DOSTAGE=yes
-
-check:
-# do nothing until test/ is really added to CVS
-# check.ll check.il
-
-#check.ll: logd_proto_test.o ll_test.o
-# ${LINKXX} -o $@ ${COMMON_LIB}_${nothrflavour} ${EXT_LIBS} ${GLOBUS_LIBS} ${TEST_LIBS} $+
-# ./check.ll
-
-check.ll:
- -echo commented out -- fix needed
-
-check.il: ${INTERLOG_TEST_OBJS}
- ${LINKXX} -o $@ ${COMMON_LIB}_${thrflavour} ${GLITE_GSS_LIB}_${nothrflavour} ${TEST_LIBS} -lpthread $+
-
-dist: distsrc distbin
-
-distsrc:
- mkdir -p ${top_srcdir}/${package}-${version}
- cd ${top_srcdir} && GLOBIGNORE="${package}-${version}" && cp -Rf * ${package}-${version}
- cd ${top_srcdir} && tar -czf ${distdir}/${package}-${version}_src.tar.gz --exclude-from=project/tar_exclude ${package}-${version}
- rm -rf ${top_srcdir}/${package}-${version}
-
-distbin:
- $(MAKE) install PREFIX=`pwd`/tmpbuilddir${stagedir}
- save_dir=`pwd`; cd tmpbuilddir${stagedir} && tar -czf $$save_dir/${top_srcdir}/${distdir}/${package}-${version}_bin.tar.gz *; cd $$save_dir
- rm -rf tmpbuilddir
-
-install:
- -mkdir -p ${PREFIX}/bin
- -mkdir -p ${PREFIX}/etc/init.d
- -mkdir -p ${PREFIX}/share/doc/${package}-${version}
- ${INSTALL} -m 755 ${LOGD} ${PREFIX}/bin
- ${INSTALL} -m 755 ${INTERLOGD} ${PREFIX}/bin
- if [ x${DOSTAGE} = xyes ]; then \
- ${INSTALL} -m 755 ${NOTIF_INTERLOGD} ${PREFIX}/bin; \
- fi
- ${INSTALL} -m 755 ${top_srcdir}/config/startup ${PREFIX}/etc/init.d/glite-lb-locallogger
- ${INSTALL} -m 644 ${top_srcdir}/LICENSE ${PREFIX}/share/doc/${package}-${version}
-
-${INTERLOG_NOBJS}: %.no: %.c
- ${CC} ${CFLAGS} ${GLOBUSTHRINC} -DIL_NOTIFICATIONS -c $< -o $@
-
-${INTERLOG_OBJS}: %.o: %.c
- ${CC} ${CFLAGS} ${GLOBUSTHRINC} -c $< -o $@
-
-${INTERLOG_EMPTY_OBJS}: %.eo: %.c
- ${CC} ${CFLAGS} ${GLOBUSTHRINC} -DLB_PERF -DPERF_EMPTY -c $< -o $@
-
-${INTERLOG_PERF_OBJS}: %.po: %.c
- ${CC} ${CFLAGS} ${GLOBUSTHRINC} -DLB_PERF -c $< -o $@
-
-#${INTERLOG_INLINE_EMPTY_OBJS}: %.io: %.c
-# ${CC} ${CFLAGS} ${GLOBUSTHRINC} -DLB_PERF -DPERF_EMPTY -DPERF_EVENTS_INLINE -c $< -o $@
-
-${LOGD_NOBJS}: %.no: %.c
- ${CC} ${CFLAGS} ${GLOBUSINC} -DLB_PERF -DLOGD_NOFILE -c $< -o $@
-
-${LOGD_OBJS}: %.o: %.c
- ${CC} ${CFLAGS} ${GLOBUSINC} -c $< -o $@
-
-logd_proto_test.o: %.o: %.c
- ${CC} ${CFLAGS} ${GLOBUSINC} -c $< -o $@
-
-ll_test.o: %.o: %.cpp
- ${CXX} ${CFLAGS} ${TEST_INC} -c $< -o $@
-
-il_test.o IlTestBase.o server_msgTest.o event_queueTest.o input_queue_socketTest.o event_storeTest.o: %.o: %.cpp
- ${CXX} ${CFLAGS} ${GLOBUSTHRINC} ${TEST_INC} -c $< -o $@
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- Build file for the GLite LB Client module
-
- Authors: Ales Krenek <ljocha@ics.muni.cz>
- Version info: $Id$
- Release: $Name$
-
- Revision history:
- $Log$
- Revision 1.5 2005/05/26 15:13:49 zurek
- inserted module.build.file
-
- Revision 1.4.2.1 2005/02/12 01:39:10 glbuild
- Changed start time
-
- Revision 1.4 2004/10/18 19:16:09 zsalvet
- RPM descriptions
-
- Revision 1.3 2004/07/29 23:21:51 dimeglio
- Changed default target from compile to dist
-
- Revision 1.2 2004/07/07 09:24:57 akrenek
- thr/nonthr flavours used correctly
-
- Revision 1.3 2004/07/06 17:45:30 flammer
- Update of classpath definitions, targets & configure file.
-
- Revision 1.2 2004/06/23 00:29:33 dimeglio
- Added standard comments and handling of support files
-
--->
-
-<project name="logger" default="dist">
-
- <!-- =========================================
- Builds the GLite LB Client Module
- ========================================= -->
-
- <!-- =========================================
- Import properties (order is important)
- ========================================= -->
-
- <!-- import baseline & user properties -->
- <import file="../org.glite/project/baseline.properties.xml" />
-
- <!-- import component build properties,
- component properties &
- component common properties -->
- <import file="./project/properties.xml"/>
-
- <!-- import subsystem build properties,
- subsystem properties &
- subsystem common properties -->
- <import file="${subsystem.properties.file}"/>
-
- <!-- import global build properties &
- global properties -->
- <import file="${global.properties.file}" />
-
- <!-- =========================================
- Load dependency property files (order is important)
- ========================================= -->
- <property file="${user.dependencies.file}"/>
- <property file="${component.dependencies.file}" />
- <property file="${subsystem.dependencies.file}" />
- <property file="${global.dependencies.file}"/>
-
- <!-- =========================================
- Load configure options (order is important)
- ========================================= -->
- <import file="${global.configure.options.file}"/>
- <import file="${component.configure.options.file}"/>
-
- <!-- =========================================
- Import task definitions (order is important)
- ========================================= -->
- <import file="${subsystem.taskdefs.file}" />
- <import file="${global.taskdefs.file}" />
-
- <!-- =========================================
- Load common targets
- ========================================= -->
- <import file="${global.targets-simple_make.file}" />
-
- <!-- =========================================
- Load version file
- ========================================= -->
- <property file="${module.version.file}"/>
- <property file="${module.build.file}"/>
-
- <!-- ==============================================
- Local private targets
- ============================================== -->
-
- <target name="localinit"
- description="Module specific initialization tasks">
- <antcall target="lbmakefiles" />
- </target>
-
- <target name="localcompile"
- description="Module specific compile tasks">
- </target>
-
- <target name="localclean"
- description="Module specific cleaning tasks">
- </target>
-
- <!-- =========================================
- RPM settings
- ========================================= -->
-
- <property name="build.package.summary" value="L&B local logger" />
- <property name="build.package.description" value="
-Daemons installed on any EGEE machine producing Logging & Bookkeeping
-(L&B)
events. They are responsible for non-blocking accept of
-an event, persistent
storage, and reliable transfer to bookkeeping server." />
-
-</project>
+++ /dev/null
-<service name="glite-lb-logger">
- <components>
-
- <component name="locallogger">
- <config-template>
- <description>glite_lb_logd daemon</description>
- <init>
- <param name="port" mandatory="false" type="int" advanced="false">
- <description>port to listen</description>
- </param>
- <param name="file-prefix" mandatory="false" type="string" advanced="false">
- <description>path and prefix for event files</description>
- </param>
- <param name="socket" mandatory="false" type="string" advanced="false">
- <description>path to local socket</description>
- </param>
- <param name="cert" mandatory="false" type="string" advanced="false">
- <description>location of server certificate</description>
- </param>
- <param name="key" mandatory="false" type="string" advanced="false">
- <description>location of server private key</description>
- </param>
- <param name="CAdir" mandatory="false" type="string" advanced="false">
- <description>"directory containing CA certificates"</description>
- </param>
- </init>
- </config-template>
- </component>
-
- <component name="interlogger">
- <config-template>
- <description>glite_lb_interlogd daemon</description>
- <init>
- <param name="file-prefix" mandatory="false" type="string" advanced="false">
- <description>path and prefix for event files</description>
- </param>
- <param name="socket" mandatory="false" type="string" advanced="false">
- <description>path to local socket</description>
- </param>
- <param name="cert" mandatory="false" type="string" advanced="false">
- <description>location of server certificate</description>
- </param>
- <param name="key" mandatory="false" type="string" advanced="false">
- <description>location of server private key</description>
- </param>
- <param name="CAdir" mandatory="false" type="string" advanced="false">
- <description>"directory containing CA certificates"</description>
- </param>
- </init>
- </config-template>
- </component>
-
- <component name="notification-interlogger">
- <config-template>
- <description>glite_lb_notif_interlogd daemon</description>
- <init>
- <param name="file-prefix" mandatory="false" type="string" advanced="false">
- <description>path and prefix for event files</description>
- </param>
- <param name="socket" mandatory="false" type="string" advanced="false">
- <description>path to local socket</description>
- </param>
- <param name="cert" mandatory="false" type="string" advanced="false">
- <description>location of server certificate</description>
- </param>
- <param name="key" mandatory="false" type="string" advanced="false">
- <description>location of server private key</description>
- </param>
- <param name="CAdir" mandatory="false" type="string" advanced="false">
- <description>"directory containing CA certificates"</description>
- </param>
- </init>
- </config-template>
- </component>
-
- </components>
-</service>
+++ /dev/null
-locallogger.port = 9002
-locallogger.file-prefix = /tmp/dglogd.log
-locallogger.socket = /tmp/interlogger.sock
-locallogger.cert = /etc/grid-security/hostcert.pem
-locallogger.key = /etc/grid-security/hostkey.pem
-locallogger.CAdir = /etc/grid-security/certificates
-interlogger.file-prefix = /tmp/dglogd.log
-interlogger.socket = /tmp/interlogger.sock
-interlogger.cert = /etc/grid-security/hostcert.pem
-interlogger.key = /etc/grid-security/hostkey.pem
-interlogger.CAdir = /etc/grid-security/certificates
-notification-interlogger.file-prefix = /tmp/notif_events
-notification-interlogger.socket = /tmp/notif_interlogger.sock
-notification-interlogger.cert = /etc/grid-security/hostcert.pem
-notification-interlogger.key = /etc/grid-security/hostkey.pem
-notification-interlogger.CAdir = /etc/grid-security/certificates
-log.Priority = DEBUG
-log.fileName = /var/glite/log/glite-lb-logger.log
+++ /dev/null
-#!/bin/sh
-
-GLITE_LOCATION=${GLITE_LOCATION:-/opt/glite}
-GLITE_LOCATION_VAR=${GLITE_LOCATION_VAR:-$GLITE_LOCATION/var}
-
-[ -f /etc/glite.conf ] && . /etc/glite.conf
-[ -f $GLITE_LOCATION/etc/glite-wms.conf ] && . $GLITE_LOCATION/etc/glite-wms.conf
-
-[ -f $GLITE_LOCATION/etc/lb.conf ] && . $GLITE_LOCATION/etc/lb.conf
-[ -f $GLITE_LOCATION_VAR/etc/lb.conf ] && . $GLITE_LOCATION_VAR/etc/lb.conf
-
-[ -f $HOME/.glite.conf ] && . $HOME/.glite.conf
-
-unset creds port
-
-start()
-{
- if test -z "$GLITE_USER" ;then
- echo 'Error: GLITE_USER is not set'
- echo FAILED
- return 1
- fi
-
- [ -n "$GLITE_HOST_CERT" -a -n "$GLITE_HOST_KEY" ] &&
- creds="-c $GLITE_HOST_CERT -k $GLITE_HOST_KEY"
-
- if test -z "$creds"; then
- if su - $GLITE_USER -c "test -r /etc/grid-security/hostkey.pem -a -r /etc/grid-security/hostcert.pem"; then
- echo "$0: WARNING: /etc/grid-security/hostkey.pem readable by $GLITE_USER"
- creds="-c /etc/grid-security/hostcert.pem -k /etc/grid-security/hostkey.pem"
- fi
- fi
-
- [ -z "$creds" ] && echo $0: WARNING: No credentials specified. Using default lookup which is dangerous. >&2
-
- [ -n "$GLITE_LB_LOGGER_PORT" ] && port="-p $GLITE_LB_LOGGER_PORT"
-
- echo -n Starting glite-lb-logd ...
- (cd /tmp && ls -f /tmp |fgrep ^dglogd_sock_ |xargs rm -f)
- su - $GLITE_USER -c "$GLITE_LOCATION/bin/glite-lb-logd \
- $creds $port" && echo " done" || echo " FAILED"
-
- echo -n Starting glite-lb-interlogd ...
- su - $GLITE_USER -c "$GLITE_LOCATION/bin/glite-lb-interlogd \
- $creds" && echo " done" || echo " FAILED"
-}
-
-stop()
-{
- echo -n Stopping glite-lb-logd ...
- killall glite-lb-logd
- echo " done"
- echo -n Stopping glite-lb-interlogd ...
- killall glite-lb-interlogd
- echo " done"
-}
-
-status()
-{
- if netstat -an --inet | grep "^tcp .* 0.0.0.0:${GLITE_LB_LOGGER_PORT:-9002} .*LISTEN" >/dev/null 2>&1 ;then
- echo glite-lb-logd running
- else
- echo glite-lb-logd not running
- return 1
- fi
- if netstat -an --unix | grep "^unix .* LISTEN.* /tmp/interlogger.sock$" >/dev/null 2>&1 ;then
- echo glite-lb-interlogd running
- else
- echo glite-lb-interlogd not running
- return 1
- fi
-}
-
-case x$1 in
- xstart) start;;
- xstop) stop;;
- xrestart) stop; start;;
- xstatus) status;;
- x*) echo usage: $0 start,stop,restart,status >&2
- exit 1;;
-esac
+++ /dev/null
-#Mon Apr 03 07:45:18 CEST 2006
-module.build=0242
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- Configuration options for the GLite LB Client module
-
- Authors: Ales Krenek <ljocha@ics.muni.cz>
- Version info: $Id$
- Release: $Name$
-
- Revision history:
- $Log$
- Revision 1.3 2006/03/15 18:25:31 akrenek
- cares
-
- Revision 1.2 2004/08/31 13:44:44 mvocu
- * added cppunit
-
- Revision 1.1 2004/07/07 12:08:47 akrenek
- *** empty log message ***
-
- Revision 1.2 2004/07/06 20:47:11 flammer
- Moved to configure.properties.xml
-
-
--->
-
- <!-- ======================================================
- Define extra properties here ...
- ====================================================== -->
-
- <project name="LB Client configuration options">
- <target name="lbmakefiles">
- <exec executable="ln" failonerror="true">
- <arg line="-fs ${component.dir}/Makefile ${module.build.dir}/Makefile"/>
- </exec>
- <echo file="${module.build.dir}/Makefile.inc">
-top_srcdir=..
-builddir=build
-stagedir=${stage.abs.dir}
-distdir=${dist.dir}
-globalprefix=${global.prefix}
-lbprefix=${subsystem.prefix}
-package=${module.package.name}
-PREFIX=${install.dir}
-version=${module.version}
-glite_location=${with.glite.location}
-globus_prefix=${with.globus.prefix}
-expat_prefix=${with.expat.prefix}
-cppunit_prefix=${with.cppunit.prefix}
-thrflavour=${with.globus.thr.flavor}
-nothrflavour=${with.globus.nothr.flavor}
- </echo>
- </target>
- </project>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- Common build properties file for the Glite LB Logger component
-
- Authors: Ales Krenek <ljocha@ics.muni.cz>
- Version info: $Id$
- Release: $Name$
-
- Revision history:
- $Log$
--->
-
-<project name="LB Logger component common properties">
-
- <!-- Include build properties to allow overwriting
- of properties for subsystem -->
- <property file="project/build.properties" />
-
- <!-- ======================================================
- Define corresponding subsystem properties
- ====================================================== -->
-
- <!-- Subsystem name -->
- <property name="subsystem.name" value="${lb.subsystem.name}"/>
-
- <!-- Subsystem prefix -->
- <property name="subsystem.prefix" value="${lb.subsystem.prefix}"/>
-
- <!-- ======================================================
- Define component properties
- ====================================================== -->
-
- <!-- Component name prefix -->
- <property name="component.prefix" value="logger" />
-
- <!-- ======================================================
- Define general component properties
- ====================================================== -->
-
- <import file="${component.general.properties.file}" />
-
- <!-- ======================================================
- Define extra properties here ...
- ====================================================== -->
-
-
-</project>
+++ /dev/null
-tar_exclude
-CVS
-build.xml
-build
-build.properties
-properties.xml
-configure.properties.xml
-.cvsignore
-.project
-.cdtproject
+++ /dev/null
-#Fri Sep 02 14:18:17 CEST 2005
-module.version=1.3.0
-module.age=0
+++ /dev/null
-#ident "$Header$"
-
-/*
- * - general queue handling routines (insert, get)
- */
-
-#include <netdb.h>
-#include <sys/socket.h>
-#include <assert.h>
-#include <errno.h>
-#include <string.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "glite/wmsutils/jobid/cjobid.h"
-
-#include "interlogd.h"
-
-struct event_queue_msg {
- struct server_msg *msg;
- struct event_queue_msg *prev;
-};
-
-struct event_queue *
-event_queue_create(char *server_name)
-{
- struct event_queue *eq;
- char *p;
-
- p = strchr(server_name, ':');
-
- if(p)
- *p++ = 0;
-
- if((eq = malloc(sizeof(*eq))) == NULL) {
- set_error(IL_NOMEM, ENOMEM, "event_queue_create: error allocating event queue");
- return(NULL);
- }
-
- memset(eq, 0, sizeof(*eq));
-
- eq->dest_name = strdup(server_name);
-
- if(p)
- *(p-1) = ':';
-
-#if defined(IL_NOTIFICATIONS)
- eq->dest_port = atoi(p);
-#else
- eq->dest_port = p ? atoi(p)+1 : GLITE_WMSC_JOBID_DEFAULT_PORT+1;
-#endif
- /* create all necessary locks */
- if(pthread_rwlock_init(&eq->update_lock, NULL)) {
- set_error(IL_SYS, errno, "event_queue_create: error creating update lock");
- free(eq);
- return(NULL);
- }
- if(pthread_mutex_init(&eq->cond_lock, NULL)) {
- set_error(IL_SYS, errno, "event_queue_create: error creating cond mutex");
- free(eq);
- return(NULL);
- }
- if(pthread_cond_init(&eq->ready_cond, NULL)) {
- set_error(IL_SYS, errno, "event_queue_create: error creating cond variable");
- free(eq);
- return(NULL);
- }
-
-#if defined(INTERLOGD_HANDLE_CMD) && defined(INTERLOGD_FLUSH)
- if(pthread_cond_init(&eq->flush_cond, NULL)) {
- set_error(IL_SYS, errno, "event_queue_create: error creating cond variable");
- free(eq);
- return(NULL);
- }
-#endif
-
- return(eq);
-}
-
-
-int
-event_queue_free(struct event_queue *eq)
-{
- assert(eq != NULL);
-
- if(!event_queue_empty(eq))
- return(-1);
-
- if(eq->thread_id)
- pthread_cancel(eq->thread_id);
-
-
- pthread_rwlock_destroy(&eq->update_lock);
- pthread_mutex_destroy(&eq->cond_lock);
- pthread_cond_destroy(&eq->ready_cond);
-#if defined(INTERLOGD_HANDLE_CMD) && defined(INTERLOGD_FLUSH)
- pthread_cond_destroy(&eq->flush_cond);
-#endif
- free(eq);
-
- return(0);
-}
-
-
-int
-event_queue_empty(struct event_queue *eq)
-{
- int ret;
-
- assert(eq != NULL);
-
- event_queue_lock_ro(eq);
- ret = (eq->head == NULL);
- event_queue_unlock(eq);
-
- return(ret);
-}
-
-
-int
-event_queue_insert(struct event_queue *eq, struct server_msg *msg)
-{
- struct event_queue_msg *el;
-#if defined(INTERLOGD_EMS)
- struct event_queue_msg *tail;
-#endif
-
- assert(eq != NULL);
-
- if((el = malloc(sizeof(*el))) == NULL)
- return(set_error(IL_NOMEM, ENOMEM, "event_queue_insert: not enough room for queue element"));
-
- el->msg = server_msg_copy(msg);
- if(el->msg == NULL) {
- free(el);
- return(-1);
- };
-
- /* this is critical section */
- event_queue_lock(eq);
-#if defined(INTERLOGD_EMS)
- if(server_msg_is_priority(msg)) {
- /* priority messages go first */
- tail = eq->tail_ems;
- if(tail) {
- el->prev = tail->prev;
- tail->prev = el;
- if (tail == eq->tail)
- eq->tail = el;
- } else {
- el->prev = eq->head;
- eq->head = el;
- if(eq->tail == NULL)
- eq->tail = el;
- }
- eq->tail_ems = el;
- } else
-#endif
- {
- /* normal messages */
- if(eq->tail)
- eq->tail->prev = el;
- else
- eq->head = el;
- eq->tail = el;
- el->prev = NULL;
- }
-#if defined(INTERLOGD_EMS)
- /* if we are inserting message between mark_prev and mark_this,
- we have to adjust mark_prev accordingly */
- if(eq->mark_this && (el->prev == eq->mark_this))
- eq->mark_prev = el;
-#endif
-
- event_queue_unlock(eq);
- /* end of critical section */
-
- return(0);
-}
-
-
-int
-event_queue_get(struct event_queue *eq, struct server_msg **msg)
-{
- struct event_queue_msg *el;
-
- assert(eq != NULL);
- assert(msg != NULL);
-
- event_queue_lock(eq);
- el = eq->head;
-#if defined(INTERLOGD_EMS)
- /* this message is marked for removal, it is first on the queue */
- eq->mark_this = el;
- eq->mark_prev = NULL;
-#endif
- event_queue_unlock(eq);
-
- if(el == NULL)
- return(-1);
-
- *msg = el->msg;
-
- return(0);
-}
-
-
-int
-event_queue_remove(struct event_queue *eq)
-{
- struct event_queue_msg *el;
-#if defined(INTERLOGD_EMS)
- struct event_queue_msg *prev;
-#endif
-
- assert(eq != NULL);
-
- /* this is critical section */
- event_queue_lock(eq);
-#if defined(INTERLOGD_EMS)
- el = eq->mark_this;
- prev = eq->mark_prev;
-
- if(el == NULL) {
- event_queue_unlock(eq);
- return(-1);
- }
-
- if(prev == NULL) {
- /* removing from head of the queue */
- eq->head = el->prev;
- } else {
- /* removing from middle of the queue */
- prev->prev = el->prev;
- }
- if(el == eq->tail) {
- /* we are removing the last message */
- eq->tail = prev;
- }
- if(el == eq->tail_ems) {
- /* we are removing last priority message */
- eq->tail_ems = prev;
- }
-
- eq->mark_this = NULL;
- eq->mark_prev = NULL;
-#else
- el = eq->head;
- if(el == NULL) {
- event_queue_unlock(eq);
- return(-1);
- }
- eq->head = el->prev;
- if(el == eq->tail) {
- eq->tail = NULL;
- }
-#endif
- event_queue_unlock(eq);
- /* end of critical section */
-
- server_msg_free(el->msg);
- free(el);
-
- return(0);
-}
-
-#if defined(IL_NOTIFICATIONS)
-
-int
-event_queue_move_events(struct event_queue *eq_s, struct event_queue *eq_d, char *notif_id)
-{
- struct event_queue_msg *p, **source_prev, **dest_tail;
-
- assert(eq_s != NULL);
- assert(notif_id != NULL);
-
- event_queue_lock(eq_s);
- if(eq_d) {
- event_queue_lock(eq_d);
- /* dest tail is set to point to the last (NULL) pointer in the list */
- dest_tail = (eq_d->head == NULL) ? &(eq_d->head) : &(eq_d->tail->prev);
- }
- source_prev = &(eq_s->head);
- p = *source_prev;
- eq_s->tail = NULL;
- while(p) {
- if(strcmp(p->msg->job_id_s, notif_id) == 0) {
- il_log(LOG_DEBUG, " moving event at offset %d from %s:%d to %s:%d\n",
- p->msg->offset, eq_s->dest_name,eq_s->dest_port, eq_d ? eq_d->dest_name : "trash",eq_d ? eq_d->dest_port : -1);
- il_log(LOG_DEBUG, " current: %x, next: %x\n", p, p->prev);
- /* remove the message from the source list */
- *source_prev = p->prev;
- if(eq_d) {
- /* append the message at the end of destination list */
- p->prev = NULL;
- *dest_tail = p;
- dest_tail = &(p->prev);
- eq_d->tail = p;
- } else {
- /* free the message */
- server_msg_free(p->msg);
- free(p);
- }
- } else {
- /* message stays */
- source_prev = &(p->prev);
- eq_s->tail = p;
- }
- p = *source_prev;
- }
- if(eq_d) event_queue_unlock(eq_d);
- event_queue_unlock(eq_s);
- return(0);
-}
-
-#endif
+++ /dev/null
-#ident "$Header$"
-
-#include <assert.h>
-#include <stdio.h>
-#include <errno.h>
-#include <string.h>
-#include <stdlib.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <dirent.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#include <fcntl.h>
-
-#include "glite/lb/consumer.h"
-#include "glite/lb/events_parse.h"
-
-#include "interlogd.h"
-
-#ifdef __GNUC__
-#define UNUSED_VAR __attribute__((unused))
-#else
-#define UNUSED_VAR
-#endif
-
-static char *file_prefix = NULL;
-
-
-struct event_store_list {
- struct event_store *es;
- struct event_store_list *next;
-};
-
-
-static struct event_store_list *store_list;
-static pthread_rwlock_t store_list_lock = PTHREAD_RWLOCK_INITIALIZER;
-
-
-/* ----------------
- * helper functions
- * ----------------
- */
-static
-char *
-jobid2eventfile(IL_EVENT_ID_T job_id)
-{
- char *buffer;
- char *hash;
-
- if(job_id) {
- hash = IL_EVENT_GET_UNIQUE(job_id);
- asprintf(&buffer, "%s.%s", file_prefix, hash);
- free(hash);
- } else
- asprintf(&buffer, "%s.default", file_prefix);
-
- return(buffer);
-}
-
-
-static
-char *
-jobid2controlfile(IL_EVENT_ID_T job_id)
-{
- char buffer[256];
- char *hash;
-
- if(job_id) {
- hash = IL_EVENT_GET_UNIQUE(job_id);
- snprintf(buffer, 256, "%s.%s.ctl", file_prefix, hash);
- free(hash);
- } else
- snprintf(buffer, 256, "%s.default.ctl", file_prefix);
-
- return(strdup(buffer));
-}
-
-
-static
-char *
-read_event_string(FILE *file)
-{
- char *buffer, *p, *n;
- int len, c;
-
- buffer=malloc(1024);
- if(buffer == NULL) {
- set_error(IL_NOMEM, ENOMEM, "read_event_string: no room for event");
- return(NULL);
- }
- p = buffer;
- len = 1024;
-
- while((c=fgetc(file)) != EOF) {
-
- /* we have to have free room for one byte */
- /* if(len - (p - buffer) < 1) */
- if(p - buffer >= len) {
- n = realloc(buffer, len + 8192);
- if(n == NULL) {
- free(buffer);
- set_error(IL_NOMEM, ENOMEM, "read_event_string: no room for event");
- return(NULL);
- }
- p = p - buffer + n;
- buffer = n;
- len += 8192;
- }
-
- if(c == EVENT_SEPARATOR) {
- *p++ = 0;
- break;
- } else
- *p++ = (char) c;
- }
-
- if(c != EVENT_SEPARATOR) {
- free(buffer);
- return(NULL);
- }
-
- return(buffer);
-}
-
-
-
-/* ------------------------------
- * event_store 'member' functions
- * ------------------------------
- */
-static
-int
-event_store_free(struct event_store *es)
-{
- assert(es != NULL);
-
- if(es->job_id_s) free(es->job_id_s);
- if(es->event_file_name) free(es->event_file_name);
- if(es->control_file_name) free(es->control_file_name);
- pthread_rwlock_destroy(&es->use_lock);
- pthread_rwlock_destroy(&es->update_lock);
- free(es);
-
- return(0);
-}
-
-
-static
-struct event_store *
-event_store_create(char *job_id_s)
-{
- struct event_store *es;
- IL_EVENT_ID_T job_id;
-
- es = malloc(sizeof(*es));
- if(es == NULL) {
- set_error(IL_NOMEM, ENOMEM, "event_store_create: error allocating room for structure");
- return(NULL);
- }
-
- memset(es, 0, sizeof(*es));
-
- il_log(LOG_DEBUG, " creating event store for id %s\n", job_id_s);
-
- job_id = NULL;
- if(strcmp(job_id_s, "default") && IL_EVENT_ID_PARSE(job_id_s, &job_id)) {
- set_error(IL_LBAPI, EDG_WLL_ERROR_PARSE_BROKEN_ULM, "event_store_create: error parsing id");
- free(es);
- return(NULL);
- }
-
- es->job_id_s = strdup(job_id_s);
- es->event_file_name = jobid2eventfile(job_id);
- es->control_file_name = jobid2controlfile(job_id);
- IL_EVENT_ID_FREE(job_id);
-
- if(pthread_rwlock_init(&es->update_lock, NULL))
- abort();
- if(pthread_rwlock_init(&es->use_lock, NULL))
- abort();
-
- return(es);
-}
-
-
-static
-int
-event_store_lock_ro(struct event_store *es)
-{
- assert(es != NULL);
-
- if(pthread_rwlock_rdlock(&es->update_lock))
- abort();
-
- return(0);
-}
-
-
-static
-int
-event_store_lock(struct event_store *es)
-{
- assert(es != NULL);
-
- if(pthread_rwlock_wrlock(&es->update_lock))
- abort();
-
- return(0);
-}
-
-
-static
-int
-event_store_unlock(struct event_store *es)
-{
- assert(es != NULL);
-
- if(pthread_rwlock_unlock(&es->update_lock))
- abort();
- return(0);
-}
-
-
-static
-int
-event_store_read_ctl(struct event_store *es)
-{
- FILE *ctl_file;
-
- assert(es != NULL);
-
- event_store_lock(es);
- if((ctl_file = fopen(es->control_file_name, "r")) == NULL) {
- /* no control file, new event file */
- es->last_committed_ls = 0;
- es->last_committed_bs = 0;
- } else {
- /* read last seen and last committed counts */
- fscanf(ctl_file, "%*s\n%ld\n%ld\n",
- &es->last_committed_ls,
- &es->last_committed_bs);
- fclose(ctl_file);
- }
- event_store_unlock(es);
-
- return(0);
-}
-
-
-static
-int
-event_store_write_ctl(struct event_store *es)
-{
- FILE *ctl;
-
- assert(es != NULL);
-
- ctl = fopen(es->control_file_name, "w");
- if(ctl == NULL) {
- set_error(IL_SYS, errno, "event_store_write_ctl: error opening control file");
- return(-1);
- }
-
- if(fprintf(ctl, "%s\n%ld\n%ld\n",
- es->job_id_s,
- es->last_committed_ls,
- es->last_committed_bs) < 0) {
- set_error(IL_SYS, errno, "event_store_write_ctl: error writing control record");
- return(-1);
- }
-
- if(fclose(ctl) < 0) {
- set_error(IL_SYS, errno, "event_store_write_ctl: error closing control file");
- return(-1);
- }
-
- return(0);
-}
-
-
-/*
- * event_store_recover()
- * - recover after restart or catch up when events missing in IPC
- * - if offset > 0, read everything behind it
- * - if offset == 0, read everything behind min(last_committed_bs, last_committed_es)
- */
-int
-event_store_recover(struct event_store *es)
-{
- struct event_queue *eq_l = NULL, *eq_b, *eq_b_new;
- struct server_msg *msg;
- char *event_s;
- int fd, ret;
- long last;
- FILE *ef;
- struct flock efl;
- char err_msg[128];
-
- assert(es != NULL);
-
-#if defined(IL_NOTIFICATIONS)
- eq_b = queue_list_get(es->dest);
-#else
- /* find bookkepping server queue */
- eq_b = queue_list_get(es->job_id_s);
-#endif
- if(eq_b == NULL)
- return(-1);
-
-#if !defined(IL_NOTIFICATIONS)
- /* get log server queue */
- eq_l = queue_list_get(NULL);
-#endif
-
- event_store_lock(es);
-
- il_log(LOG_DEBUG, " reading events from %s\n", es->event_file_name);
-
- /* open event file */
- ef = fopen(es->event_file_name, "r");
- if(ef == NULL) {
- snprintf(err_msg, sizeof(err_msg),
- "event_store_recover: error opening event file %s",
- es->event_file_name);
- set_error(IL_SYS, errno, err_msg);
- event_store_unlock(es);
- return(-1);
- }
-
- /* lock the file for reading (we should not read while dglogd is writing) */
- fd = fileno(ef);
- efl.l_type = F_RDLCK;
- efl.l_whence = SEEK_SET;
- efl.l_start = 0;
- efl.l_len = 0;
- if(fcntl(fd, F_SETLKW, &efl) < 0) {
- snprintf(err_msg, sizeof(err_msg),
- "event_store_recover: error locking event file %s",
- es->event_file_name);
- set_error(IL_SYS, errno, err_msg);
- event_store_unlock(es);
- fclose(ef);
- return(-1);
- }
-
- while(1) { /* try, try, try */
-
- /* get the position in file to be sought */
- if(es->offset)
- last = es->offset;
- else {
-#if !defined(IL_NOTIFICATIONS)
- if(eq_b == eq_l)
- last = es->last_committed_ls;
- else
-#endif
- /* last = min(ls, bs) */
- /* I took the liberty to optimize this,
- since LS is not used. */
- /* last = (es->last_committed_bs <
- es->last_committed_ls) ? es->last_committed_bs :
- es->last_committed_ls; */
- last = es->last_committed_bs;
- }
-
- il_log(LOG_DEBUG, " setting starting file position to %ld\n", last);
- il_log(LOG_DEBUG, " bytes sent to logging server: %d\n", es->last_committed_ls);
- il_log(LOG_DEBUG, " bytes sent to bookkeeping server: %d\n", es->last_committed_bs);
-
- if(last > 0) {
- int c;
-
- /* skip all committed or already enqueued events */
- /* be careful - check, if the offset really points to the
- beginning of event string */
- if(fseek(ef, last-1, SEEK_SET) < 0) {
- set_error(IL_SYS, errno, "event_store_recover: error setting position for read");
- event_store_unlock(es);
- fclose(ef);
- return(-1);
- }
- /* the last enqueued event MUST end with EVENT_SEPARATOR,
- even if the offset points at EOF */
- if((c=fgetc(ef)) != EVENT_SEPARATOR) {
- /* Houston, we have got a problem */
- il_log(LOG_WARNING,
- " file position %ld does not point at the beginning of event string, backing off!\n",
- last);
- /* now, where were we? */
- if(es->offset) {
- /* next try will be with
- last_commited_bs */
- es->offset = 0;
- } else {
- /* this is really weird... back off completely */
- es->last_committed_ls = es->last_committed_bs = 0;
- }
- } else {
- /* OK, break out of the loop */
- break;
- }
- } else {
- /* this breaks out of the loop, we are starting at
- * the beginning of file
- */
- if(fseek(ef, 0, SEEK_SET) < 0) {
- set_error(IL_SYS, errno, "event_store_recover: error setting position for read");
- event_store_unlock(es);
- fclose(ef);
- return(-1);
- }
- break;
- }
- }
-
- /* enqueue all remaining events */
- ret = 1;
- msg = NULL;
- while((event_s=read_event_string(ef)) != NULL) {
-
- /* last holds the starting position of event_s in file */
- il_log(LOG_DEBUG, " reading event at %ld\n", last);
-
- /* break from now on means there was some error */
- ret = -1;
-
- /* create message for server */
- {
- il_octet_string_t e;
-
- e.data = event_s;
- e.len = strlen(event_s);
- msg = server_msg_create(&e, last);
- free(event_s);
- }
- if(msg == NULL) {
- il_log(LOG_ALERT, " event file corrupted! Please move it to quarantine (ie. somewhere else) and restart interlogger.\n");
- break;
- }
- msg->es = es;
-
- /* first enqueue to the LS */
- if(!bs_only && (last >= es->last_committed_ls)) {
-
- il_log(LOG_DEBUG, " queueing event at %ld to logging server\n", last);
-
-#if !defined(IL_NOTIFICATIONS)
- if(enqueue_msg(eq_l, msg) < 0)
- break;
-#endif
- }
-
-#ifdef IL_NOTIFICATIONS
- eq_b_new = queue_list_get(msg->dest);
- if (eq_b_new != eq_b) {
- free(es->dest);
- es->dest = strdup(msg->dest);
- eq_b = eq_b_new;
- }
-#endif
-
- /* now enqueue to the BS, if neccessary */
- if((eq_b != eq_l) &&
- (last >= es->last_committed_bs)) {
-
- il_log(LOG_DEBUG, " queueing event at %ld to bookkeeping server\n", last);
-
- if(enqueue_msg(eq_b, msg) < 0)
- break;
- }
- server_msg_free(msg);
- msg = NULL;
-
- /* now last is also the offset behind the last successfully queued event */
- last = ftell(ef);
-
- /* ret == 0 means EOF or incomplete event found */
- ret = 0;
-
- } /* while */
-
- /* due to this little assignment we had to lock the event_store for writing */
- es->offset = last;
- il_log(LOG_DEBUG, " event store offset set to %ld\n", last);
-
- if(msg)
- server_msg_free(msg);
-
- fclose(ef);
- il_log(LOG_DEBUG, " finished reading events with %d\n", ret);
-
- event_store_unlock(es);
- return(ret);
-}
-
-
-/*
- * event_store_sync()
- * - check the position of event and fill holes from file
- * - return 1 if the event is new,
- * 0 if it was seen before,
- * -1 if there was an error
- */
-int
-event_store_sync(struct event_store *es, long offset)
-{
- int ret;
-
- assert(es != NULL);
-
- event_store_lock_ro(es);
- if(es->offset == offset)
- /* we are up to date */
- ret = 1;
- else if(es->offset > offset)
- /* we have already seen this event */
- ret = 0;
- else {
- /* es->offset < offset, i.e. we have missed some events */
- event_store_unlock(es);
- ret = event_store_recover(es);
- /* XXX possible room for intervention by another thread - is there
- * any other thread messing with us?
- * 1) After recover() es->offset is set at the end of file.
- * 2) es->offset is set only by recover() and next().
- * 3) Additional recover can not do much harm.
- * 4) And next() is only called by the same thread as sync().
- * 5) use_lock is in place, so no cleanup possible
- * => no one is messing with us right now */
- event_store_lock_ro(es);
- if(ret < 0)
- ret = -1;
- else
- if(es->offset <= offset) {
- /* Apparently there is something wrong - we are receiving an event
- * which is beyond the end of file. Someone must have removed the file
- * when we were not looking. The question is - what should we do with the event?
- * We have to send it, as this is the only one occasion when we see it.
- * However, we must not allow the es->offset to be set using this event,
- * as it would point after the end of file. Sort this out in event_store_next().
- */
- ret = 1;
- } else if(es->offset > offset) {
- /* we have seen at least this event */
- ret = 0;
- }
- }
- event_store_unlock(es);
- return(ret);
-}
-
-
-int
-event_store_next(struct event_store *es, long offset, int len)
-{
- assert(es != NULL);
-
- event_store_lock(es);
- /* Whoa, be careful now. The es->offset points right after the last enqueued event,
- * but it may not be the offset of the event WE have just enqueued, because:!
- * 1) someone could have removed the event file behind our back
- * 2) the file could have been recover()ed and more events read
- * In either case the offset should not be moved.
- */
- if(es->offset == offset) {
- es->offset += len;
- }
- event_store_unlock(es);
-
- return(0);
-}
-
-
-/*
- * event_store_commit()
- *
- */
-int
-event_store_commit(struct event_store *es, int len, int ls)
-{
- assert(es != NULL);
-
- event_store_lock(es);
-
- if(ls)
- es->last_committed_ls += len;
- else {
- es->last_committed_bs += len;
- if (bs_only) es->last_committed_ls += len;
- }
-
- if(event_store_write_ctl(es) < 0) {
- event_store_unlock(es);
- return(-1);
- }
-
- event_store_unlock(es);
-
-
- return(0);
-}
-
-
-/*
- * event_store_clean()
- * - remove the event files (event and ctl), if they are not needed anymore
- * - returns 0 if event_store is in use, 1 if it was removed and -1 on error
- *
- * Q: How do we know that we can safely remove the files?
- * A: When all events from file have been committed both by LS and BS.
- */
-static
-int
-event_store_clean(struct event_store *es)
-{
- long last;
- int fd;
- FILE *ef;
- struct flock efl;
-
- assert(es != NULL);
-
- /* prevent sender threads from updating */
- event_store_lock(es);
-
- il_log(LOG_DEBUG, " trying to cleanup event store %s\n", es->job_id_s);
- il_log(LOG_DEBUG, " bytes sent to logging server: %d\n", es->last_committed_ls);
- il_log(LOG_DEBUG, " bytes sent to bookkeeping server: %d\n", es->last_committed_bs);
-
- /* preliminary check to avoid opening event file */
- /* if the positions differ, some events still have to be sent */
- if(es->last_committed_ls != es->last_committed_bs) {
- event_store_unlock(es);
- il_log(LOG_DEBUG, " not all events sent, cleanup aborted\n");
- return(0);
- }
-
- /* the file can only be removed when all the events were succesfully sent
- (ie. committed both by LS and BS */
- /* That also implies that the event queues are 'empty' at the moment. */
- ef = fopen(es->event_file_name, "r+");
- if(ef == NULL) {
- /* if we can not open the event store, it is an error and the struct should be removed */
- /* XXX - is it true? */
- event_store_unlock(es);
- il_log(LOG_ERR, " event_store_clean: error opening event file: %s\n", strerror(errno));
- return(1);
- }
-
- fd = fileno(ef);
-
- /* prevent local-logger from writing into event file */
- efl.l_type = F_WRLCK;
- efl.l_whence = SEEK_SET;
- efl.l_start = 0;
- efl.l_len = 0;
- if(fcntl(fd, F_SETLK, &efl) < 0) {
- il_log(LOG_DEBUG, " could not lock event file, cleanup aborted\n");
- fclose(ef);
- event_store_unlock(es);
- if(errno != EACCES &&
- errno != EAGAIN) {
- set_error(IL_SYS, errno, "event_store_clean: error locking event file");
- return(-1);
- }
- return(0);
- }
-
- /* now the file should not contain partially written event, so it is safe
- to get offset behind last event by seeking the end of file */
- if(fseek(ef, 0, SEEK_END) < 0) {
- set_error(IL_SYS, errno, "event_store_clean: error seeking the end of file");
- event_store_unlock(es);
- fclose(ef);
- return(-1);
- }
-
- last = ftell(ef);
- il_log(LOG_DEBUG, " total bytes in file: %d\n", last);
-
- if(es->last_committed_ls < last) {
- fclose(ef);
- event_store_unlock(es);
- il_log(LOG_DEBUG, " events still waiting in queue, cleanup aborted\n");
- return(0);
- } else if( es->last_committed_ls > last) {
- il_log(LOG_WARNING, " warning: event file seems to shrink!\n");
- }
-
- /* now we are sure that all events were sent and the event queues are empty */
- il_log(LOG_INFO, " removing event file %s\n", es->event_file_name);
-
- /* remove the event file */
- unlink(es->event_file_name);
- unlink(es->control_file_name);
-
- /* clear the counters */
- es->last_committed_ls = 0;
- es->last_committed_bs = 0;
- es->offset = 0;
-
- /* unlock the event_store even if it is going to be removed */
- event_store_unlock(es);
-
- /* close the event file (that unlocks it as well) */
- fclose(ef);
-
- /* indicate that it is safe to remove this event_store */
- return(1);
-}
-
-
-
-/* --------------------------------
- * event store management functions
- * --------------------------------
- */
-struct event_store *
-event_store_find(char *job_id_s)
-{
- struct event_store_list *q, *p;
- struct event_store *es;
-
- if(pthread_rwlock_wrlock(&store_list_lock)) {
- abort();
- }
-
- es = NULL;
-
- q = NULL;
- p = store_list;
-
- while(p) {
- if(strcmp(p->es->job_id_s, job_id_s) == 0) {
- es = p->es;
- if(pthread_rwlock_rdlock(&es->use_lock))
- abort();
- if(pthread_rwlock_unlock(&store_list_lock))
- abort();
- return(es);
- }
-
- q = p;
- p = p->next;
- }
-
- es = event_store_create(job_id_s);
- if(es == NULL) {
- if(pthread_rwlock_unlock(&store_list_lock))
- abort();
- return(NULL);
- }
-
- p = malloc(sizeof(*p));
- if(p == NULL) {
- set_error(IL_NOMEM, ENOMEM, "event_store_find: no room for new event store");
- if(pthread_rwlock_unlock(&store_list_lock))
- abort();
- return(NULL);
- }
-
- p->next = store_list;
- store_list = p;
-
- p->es = es;
-
- if(pthread_rwlock_rdlock(&es->use_lock))
- abort();
-
- if(pthread_rwlock_unlock(&store_list_lock))
- abort();
-
- return(es);
-}
-
-
-int
-event_store_release(struct event_store *es)
-{
- assert(es != NULL);
-
- if(pthread_rwlock_unlock(&es->use_lock))
- abort();
- il_log(LOG_DEBUG, " released lock on %s\n", es->job_id_s);
- return(0);
-}
-
-
-static
-int
-event_store_from_file(char *filename)
-{
- struct event_store *es;
- FILE *event_file;
- char *event_s, *job_id_s = NULL;
- int ret;
-#if defined(IL_NOTIFICATIONS)
- edg_wll_Event *notif_event;
- edg_wll_Context context;
- char *dest_name = NULL;
-
- edg_wll_InitContext(&context);
-#endif
-
- il_log(LOG_INFO, " attaching to event file: %s\n", filename);
-
- event_file = fopen(filename, "r");
- if(event_file == NULL) {
- set_error(IL_SYS, errno, "event_store_from_file: error opening event file");
- return(-1);
- }
- event_s = read_event_string(event_file);
- fclose(event_file);
- if(event_s == NULL)
- return(0);
-
-#if defined(IL_NOTIFICATIONS)
- if((ret=edg_wll_ParseNotifEvent(context, event_s, ¬if_event))) {
- set_error(IL_LBAPI, ret, "event_store_from_file: could not parse event");
- ret = -1;
- goto out;
- }
- if(notif_event->notification.notifId == NULL) {
- set_error(IL_LBAPI, EDG_WLL_ERROR_PARSE_BROKEN_ULM,
- "event_store_from_file: parse error - no notif id");
- ret = -1;
- goto out;
- }
- if((job_id_s = edg_wll_NotifIdUnparse(notif_event->notification.notifId)) == NULL) {
- set_error(IL_SYS, ENOMEM, "event_store_from_file: could not copy id");
- ret = -1;
- goto out;
- }
- if(notif_event->notification.dest_host &&
- (strlen(notif_event->notification.dest_host) > 0)) {
- asprintf(&dest_name, "%s:%d", notif_event->notification.dest_host, notif_event->notification.dest_port);
- }
-
-#else
- job_id_s = edg_wll_GetJobId(event_s);
-#endif
- il_log(LOG_DEBUG, " event id: '%s'\n", job_id_s);
- if(job_id_s == NULL) {
- il_log(LOG_NOTICE, " skipping file, could not parse event\n");
- ret = 0;
- goto out;
- }
-
- es=event_store_find(job_id_s);
-
- if(es == NULL) {
- ret = -1;
- goto out;
- }
-
-#if defined(IL_NOTIFICATIONS)
- es->dest = dest_name;
-#endif
-
- if((es->last_committed_ls == 0) &&
- (es->last_committed_bs == 0) &&
- (es->offset == 0)) {
- ret = event_store_read_ctl(es);
- } else
- ret = 0;
-
- event_store_release(es);
-
-out:
-#if defined(IL_NOTIFICATIONS)
- if(notif_event) {
- edg_wll_FreeEvent(notif_event);
- free(notif_event);
- }
-#endif
- if(event_s) free(event_s);
- if(job_id_s) free(job_id_s);
- return(ret);
-}
-
-
-int
-event_store_init(char *prefix)
-{
- if(file_prefix == NULL) {
- file_prefix = strdup(prefix);
- store_list = NULL;
- }
-
- /* read directory and get a list of event files */
- {
- int len;
-
- char *p, *dir;
- DIR *event_dir;
- struct dirent *entry;
-
-
- /* get directory name */
- p = strrchr(file_prefix, '/');
- if(p == NULL) {
- dir = strdup(".");
- p = "";
- len = 0;
- } else {
- *p = '\0';
- dir = strdup(file_prefix);
- *p++ = '/';
- len = strlen(p);
- }
-
- event_dir = opendir(dir);
- if(event_dir == NULL) {
- free(dir);
- set_error(IL_SYS, errno, "event_store_init: error opening event directory");
- return(-1);
- }
-
- while((entry=readdir(event_dir))) {
- char *s;
-
- /* skip all files that do not match prefix */
- if(strncmp(entry->d_name, p, len) != 0)
- continue;
-
- /* skip all control files */
- if((s=strstr(entry->d_name, ".ctl")) != NULL &&
- s[4] == '\0')
- continue;
-
- s = malloc(strlen(dir) + strlen(entry->d_name) + 2);
- if(s == NULL) {
- free(dir);
- set_error(IL_NOMEM, ENOMEM, "event_store_init: no room for file name");
- return(-1);
- }
-
- *s = '\0';
- strcat(s, dir);
- strcat(s, "/");
- strcat(s, entry->d_name);
-
- if(event_store_from_file(s) < 0) {
- free(dir);
- free(s);
- closedir(event_dir);
- return(-1);
- }
-
- free(s);
- }
- closedir(event_dir);
-
- /* one more pass - this time remove stale .ctl files */
- event_dir = opendir(dir);
- if(event_dir == NULL) {
- free(dir);
- set_error(IL_SYS, errno, "event_store_init: error opening event directory");
- return(-1);
- }
-
- while((entry=readdir(event_dir))) {
- char *s;
-
- /* skip all files that do not match prefix */
- if(strncmp(entry->d_name, p, len) != 0)
- continue;
-
- /* find all control files */
- if((s=strstr(entry->d_name, ".ctl")) != NULL &&
- s[4] == '\0') {
- char *ef;
- struct stat st;
-
- /* is there corresponding event file? */
- ef = malloc(strlen(dir) + strlen(entry->d_name) + 2);
- if(ef == NULL) {
- free(dir);
- set_error(IL_NOMEM, ENOMEM, "event_store_init: no room for event file name");
- return(-1);
- }
-
- s[0] = 0;
- *ef = '\0';
- strcat(ef, dir);
- strcat(ef, "/");
- strcat(ef, entry->d_name);
- s[0] = '.';
-
- if(stat(ef, &st) == 0) {
- /* something is there */
- /* XXX - it could be something else than event file, but do not bother now */
- } else {
- /* could not stat file, remove ctl */
- strcat(ef, s);
- il_log(LOG_DEBUG, " removing stale file %s\n", ef);
- if(unlink(ef))
- il_log(LOG_ERR, " could not remove file %s: %s\n", ef, strerror(errno));
-
- }
- free(ef);
-
- }
- }
- closedir(event_dir);
- free(dir);
- }
-
- return(0);
-}
-
-
-int
-event_store_recover_all()
-{
- struct event_store_list *sl;
-
-
- if(pthread_rwlock_rdlock(&store_list_lock))
- abort();
-
- /* recover all event stores */
- sl = store_list;
- while(sl != NULL) {
-
- /* recover this event store */
- /* no need to lock use_lock in event_store, the store_list_lock is in place */
- if(event_store_recover(sl->es) < 0) {
- il_log(LOG_ERR, " error recovering event store %s:\n %s\n", sl->es->event_file_name, error_get_msg());
- clear_error();
- }
- sl = sl->next;
- }
-
- if(pthread_rwlock_unlock(&store_list_lock))
- abort();
-
- return(0);
-}
-
-
-#if 0
-int
-event_store_remove(struct event_store *es)
-{
- struct event_store_list *p, **q;
-
- assert(es != NULL);
-
- switch(event_store_clean(es)) {
- case 0:
- il_log(LOG_DEBUG, " event store not removed, still used\n");
- return(0);
-
- case 1:
- if(pthread_rwlock_wrlock(&store_list_lock) < 0) {
- set_error(IL_SYS, errno, " event_store_remove: error locking event store list");
- return(-1);
- }
-
- p = store_list;
- q = &store_list;
-
- while(p) {
- if(p->es == es) {
- (*q) = p->next;
- event_store_free(es);
- free(p);
- break;
- }
- q = &(p->next);
- p = p->next;
- }
-
- if(pthread_rwlock_unlock(&store_list_lock) < 0) {
- set_error(IL_SYS, errno, " event_store_remove: error unlocking event store list");
- return(-1);
- }
- return(1);
-
- default:
- return(-1);
- }
- /* not reached */
- return(0);
-}
-#endif
-
-int
-event_store_cleanup()
-{
- struct event_store_list *sl;
- struct event_store_list *slnext;
- struct event_store_list **prev;
-
- /* try to remove event files */
-
- if(pthread_rwlock_wrlock(&store_list_lock))
- abort();
-
- sl = store_list;
- prev = &store_list;
-
- while(sl != NULL) {
- int ret;
-
- slnext = sl->next;
-
- /* one event store at time */
- ret = pthread_rwlock_trywrlock(&sl->es->use_lock);
- if(ret == EBUSY) {
- il_log(LOG_DEBUG, " event_store %s is in use by another thread\n",
- sl->es->job_id_s);
- sl = slnext;
- continue;
- } else if (ret < 0)
- abort();
-
- switch(event_store_clean(sl->es)) {
-
- case 1:
- /* remove this event store */
- (*prev) = slnext;
- event_store_free(sl->es);
- free(sl);
- break;
-
- case -1:
- il_log(LOG_ERR, " error removing event store %s (file %s):\n %s\n",
- sl->es->job_id_s, sl->es->event_file_name, error_get_msg());
- event_store_release(sl->es);
- clear_error();
- /* go on to the next */
-
- default:
- event_store_release(sl->es);
- prev = &(sl->next);
- break;
- }
-
- sl = slnext;
- }
-
- if(pthread_rwlock_unlock(&store_list_lock))
- abort();
-
- return(0);
-}
-
+++ /dev/null
-#ident "$Header$"
-
-#include <errno.h>
-#include <stdio.h>
-#include <string.h>
-#include <netdb.h>
-#include <assert.h>
-#include <pthread.h>
-#include <stdlib.h>
-
-/* XXX DK: */
-#include <err.h> // SSL header file
-
-#include "glite/security/glite_gss.h"
-
-#include "il_error.h"
-
-
-extern int log_level;
-
-static pthread_key_t err_key;
-
-static int IL_ERR_MSG_LEN = 1024;
-
-static
-void
-error_key_delete(void *err)
-{
- free(err);
-}
-
-static
-void
-error_key_create()
-{
- pthread_key_create(&err_key, error_key_delete);
-}
-
-static
-struct error_inf *
-error_get_err ()
-{
- struct error_inf *err;
-
- /* get thread specific error structure */
- err = (struct error_inf *)pthread_getspecific(err_key);
- assert(err != NULL);
-
- return(err);
-}
-
-int
-init_errors(int level)
-{
- static pthread_once_t error_once = PTHREAD_ONCE_INIT;
- struct error_inf *err;
-
- /* create the key for thread specific error only once */
- pthread_once(&error_once, error_key_create);
-
- /* there is no thread error yet, try to create one */
- if((err = (struct error_inf *)malloc(sizeof(*err)))) {
- /* allocation successfull, make it thread specific data */
- if(pthread_setspecific(err_key, err)) {
- free(err);
- return(-1);
- }
- } else
- return(-1);
-
- err->code_maj = 0;
- err->code_min = 0;
- err->msg = malloc(IL_ERR_MSG_LEN + 1);
- if(err->msg == NULL)
- return(-1);
-
- if(level)
- log_level = level;
-
- return(0);
-}
-
-int
-set_error(int code, long minor, char *msg)
-{
- struct error_inf *err;
-
- err = error_get_err();
-
- err->code_maj = code;
- err->code_min = minor;
-
- switch(code) {
-
- case IL_SYS:
- snprintf(err->msg, IL_ERR_MSG_LEN, "%s: %s", msg, strerror(err->code_min));
- break;
-
- case IL_HOST:
- snprintf(err->msg, IL_ERR_MSG_LEN, "%s: %s", msg, hstrerror(err->code_min));
- break;
-
- /* XXX DK: je tahle hodnota k necemu potreba? */
- case IL_AUTH:
- snprintf(err->msg, IL_ERR_MSG_LEN, "%s: %s", msg, ERR_error_string(err->code_min, NULL));
- break;
-
- case IL_DGGSS:
- switch(err->code_min) {
-
- case EDG_WLL_GSS_ERROR_GSS:
- snprintf(err->msg, IL_ERR_MSG_LEN, "%s", msg);
- break;
-
- case EDG_WLL_GSS_ERROR_TIMEOUT:
- snprintf(err->msg, IL_ERR_MSG_LEN, "%s: Timeout in GSS connection.", msg);
- break;
-
- case EDG_WLL_GSS_ERROR_EOF:
- snprintf(err->msg, IL_ERR_MSG_LEN, "%s: Connection lost.", msg);
- break;
-
- case EDG_WLL_GSS_ERROR_ERRNO:
- snprintf(err->msg, IL_ERR_MSG_LEN, "%s: %s", msg, strerror(errno));
- break;
-
- case EDG_WLL_GSS_ERROR_HERRNO:
- snprintf(err->msg, IL_ERR_MSG_LEN, "%s: %s", msg, hstrerror(errno));
- break;
- }
-
- default:
- strncpy(err->msg, msg, IL_ERR_MSG_LEN);
- }
-
- return(code);
-}
-
-
-int
-clear_error() {
- struct error_inf *err;
-
- err = error_get_err();
-
- err->code_maj = IL_OK;
- err->code_min = 0;
- *(err->msg) = 0;
-
- return(0);
-}
-
-
-int
-error_get_maj()
-{
- struct error_inf *err;
-
- err = error_get_err();
-
- return(err->code_maj);
-}
-
-
-long
-error_get_min()
-{
- struct error_inf *err;
-
- err = error_get_err();
-
- return(err->code_min);
-}
-
-
-char *
-error_get_msg()
-{
- struct error_inf *err;
-
- err = error_get_err();
-
- return(err->msg);
-}
+++ /dev/null
-#ifndef IL_ERROR_H
-#define IL_ERROR_H
-
-#ident "$Header$"
-
-#include <syslog.h>
-
-enum err_code_maj { /* minor = */
- IL_OK, /* 0 */
- IL_SYS, /* errno */
- IL_NOMEM, /* ENOMEM */
- IL_AUTH, /* 0 (SSL error) */
- IL_PROTO, /* LB_* */
- IL_LBAPI, /* dgLBErrCode */
- IL_DGGSS, /* EDG_WLL_GSS_* */
- IL_HOST /* h_errno */
-};
-
-struct error_inf {
- int code_maj;
- long code_min;
- char *msg;
-};
-
-int init_errors(int);
-int set_error(int, long, char *);
-int clear_error();
-int error_get_maj();
-long error_get_min();
-char *error_get_msg();
-
-int il_log(int, char *, ...);
-
-#endif
+++ /dev/null
-#ident "$Header$"
-
-#include <errno.h>
-#include <string.h>
-#include <unistd.h>
-
-#include "glite/wmsutils/jobid/cjobid.h"
-#include "glite/lb/context.h"
-#include "glite/lb/events_parse.h"
-#include "glite/lb/il_string.h"
-
-#include "interlogd.h"
-
-#ifdef LB_PERF
-#include "glite/lb/lb_perftest.h"
-#endif
-
-int
-enqueue_msg(struct event_queue *eq, struct server_msg *msg)
-{
-#if defined(IL_NOTIFICATIONS)
- struct event_queue *eq_known;
-
- /* now we have a new event with possibly changed destination,
- so check for the already known destination and possibly move
- events from the original output queue to a new one */
- eq_known = notifid_map_get_dest(msg->job_id_s);
- if(eq != eq_known) {
- /* client has changed delivery address for this notification */
- if(notifid_map_set_dest(msg->job_id_s, eq) < 0)
- return(-1);
- /* move all events with this notif_id from eq_known to eq */
- if(eq_known != NULL)
- event_queue_move_events(eq_known, eq, msg->job_id_s);
- }
-#endif
-
- /* fire thread to take care of this queue */
- if(event_queue_create_thread(eq) < 0)
- return(-1);
-
-#if defined(IL_NOTIFICATIONS)
- /* if there are no data to send, do not send anything
- (messsage was just to change the delivery address) */
- if(msg->len == 0)
- return(0);
-#endif
- /* avoid losing signal to thread */
- event_queue_cond_lock(eq);
-
- /* insert new event */
- if(event_queue_insert(eq, msg) < 0) {
- event_queue_cond_unlock(eq);
- return(-1);
- }
-
- /* signal thread that we have a new message */
- event_queue_signal(eq);
-
- /* allow thread to continue */
- event_queue_cond_unlock(eq);
-
- return(0);
-}
-
-
-#if defined(INTERLOGD_HANDLE_CMD) && defined(INTERLOGD_FLUSH)
-pthread_mutex_t flush_lock = PTHREAD_MUTEX_INITIALIZER;
-pthread_cond_t flush_cond = PTHREAD_COND_INITIALIZER;
-#endif /* INTERLOGD_FLUSH */
-
-#ifdef INTERLOGD_HANDLE_CMD
-static
-int
-parse_cmd(char *event, char **job_id_s, long *receipt, int *timeout)
-{
- char *token, *r;
- int ret;
-
- if(strstr(event, "DG.TYPE=\"command\"") == NULL)
- return(-1);
-
- *job_id_s = NULL;
- *timeout = 0;
- *receipt = 0;
- ret = 0;
-
- for(token = strtok(event, " "); token != NULL; token = strtok(NULL, " ")) {
- r = index(token, '=');
- if(r == NULL) {
- ret = -1;
- continue;
- }
- if(strncmp(token, "DG.COMMAND", r - token) == 0) {
-#if defined(INTERLOGD_FLUSH)
- if(strcmp(++r, "\"flush\"")) {
-#endif
- il_log(LOG_WARNING, " command %s not implemented\n", r);
- ret = -1;
- continue;
-#if defined(INTERLOGD_FLUSH)
- }
-#endif
- } else if(strncmp(token, "DG.JOBID", r - token) == 0) {
- char *p;
-
- r += 2; /* skip =" */
- p = index(r, '"');
- if(p == NULL) { ret = -1; continue; }
- *job_id_s = strndup(r, p-r);
-
- } else if(strncmp(token, "DG.TIMEOUT", r - token) == 0) {
- sscanf(++r, "\"%d\"", timeout);
- } else if(strncmp(token, "DG.LLLID", r - token) == 0) {
- sscanf(++r, "%ld", receipt);
- }
-
- }
- return(0);
-}
-
-
-/* return value:
- * 0 - not command
- * 1 - success
- * -1 - failure
- */
-
-static
-int
-handle_cmd(il_octet_string_t *event, long offset)
-{
- char *job_id_s;
- struct event_queue *eq;
- int num_replies, num_threads = 0;
- int timeout, result;
- long receipt;
- struct timespec endtime;
- struct timeval tv;
-
- /* parse command */
- if(parse_cmd(event->data, &job_id_s, &receipt, &timeout) < 0)
- return(0);
-
-#if defined(INTERLOGD_FLUSH)
- il_log(LOG_DEBUG, " received FLUSH command\n");
-
- /* catchup with all neccessary event files */
- if(job_id_s) {
- struct event_store *es = event_store_find(job_id_s);
-
- if(es == NULL) {
- goto cmd_error;
- }
- result = event_store_recover(es);
- /* NOTE: if flush had been stored in file, there would have been
- no need to lock the event_store at all */
- event_store_release(es);
- if(result < 0) {
- il_log(LOG_ERR, " error trying to catch up with event file: %s\n",
- error_get_msg());
- clear_error();
- }
- } else
- /* this call does not fail :-) */
- event_store_recover_all();
-
- il_log(LOG_DEBUG, " alerting threads to report status\n");
-
- /* prevent threads from reporting too early */
- if(pthread_mutex_lock(&flush_lock) < 0) {
- /*** this error is considered too serious to allow the program run anymore!
- set_error(IL_SYS, errno, "pthread_mutex_lock: error locking flush lock");
- goto cmd_error;
- */
- abort();
- }
-
- /* wake up all threads */
- if(job_id_s) {
- /* find appropriate queue */
- eq = queue_list_get(job_id_s);
- if(eq == NULL) goto cmd_error;
- if(!event_queue_empty(eq) && !queue_list_is_log(eq)) {
- num_threads++;
- event_queue_cond_lock(eq);
- eq->flushing = 1;
- event_queue_wakeup(eq);
- event_queue_cond_unlock(eq);
- }
- } else {
- /* iterate over event queues */
- for(eq=queue_list_first(); eq != NULL; eq=queue_list_next()) {
- if(!event_queue_empty(eq) && !queue_list_is_log(eq)) {
- num_threads++;
- event_queue_cond_lock(eq);
- eq->flushing = 1;
- event_queue_wakeup(eq);
- event_queue_cond_unlock(eq);
- }
- }
- }
- if(!bs_only) {
- eq = queue_list_get(NULL);
- if(eq == NULL) goto cmd_error;
- if(!event_queue_empty(eq)) {
- num_threads++;
- event_queue_cond_lock(eq);
- eq->flushing = 1;
- event_queue_wakeup(eq);
- event_queue_cond_unlock(eq);
- }
- }
-
- /* wait for thread replies */
- num_replies = 0;
- result = 1;
- gettimeofday(&tv, NULL);
- endtime.tv_sec = tv.tv_sec + timeout;
- endtime.tv_nsec = 1000 * tv.tv_usec;
- while(num_replies < num_threads) {
- int ret;
- if((ret=pthread_cond_timedwait(&flush_cond, &flush_lock, &endtime)) < 0) {
- il_log(LOG_ERR, " error waiting for thread reply: %s\n", strerror(errno));
- result = (ret == ETIMEDOUT) ? 0 : -1;
- break;
- }
-
- /* collect results from reporting threads */
- if(job_id_s) {
- /* find appropriate queue */
- eq = queue_list_get(job_id_s);
- if(eq == NULL) goto cmd_error;
- if(!queue_list_is_log(eq)) {
- event_queue_cond_lock(eq);
- if(eq->flushing == 2) {
- eq->flushing = 0;
- num_replies++;
- result = ((result == 1) || (eq->flush_result < 0)) ?
- eq->flush_result : result;
- }
- event_queue_cond_unlock(eq);
- }
- } else {
- /* iterate over event queues */
- for(eq=queue_list_first(); eq != NULL; eq=queue_list_next()) {
- if(!queue_list_is_log(eq)) {
- event_queue_cond_lock(eq);
- if(eq->flushing == 2) {
- eq->flushing = 0;
- num_replies++;
- il_log(LOG_DEBUG, " thread reply: %d\n", eq->flush_result);
- result = ((result == 1) || (eq->flush_result < 0)) ?
- eq->flush_result : result;
- }
- event_queue_cond_unlock(eq);
- }
- }
- }
- if(!bs_only) {
- eq = queue_list_get(NULL);
- if(eq == NULL) goto cmd_error;
- event_queue_cond_lock(eq);
- if(eq->flushing == 2) {
- eq->flushing = 0;
- num_replies++;
- result = ((result == 1) || (eq->flush_result < 0)) ?
- eq->flush_result : result;
- }
- event_queue_cond_unlock(eq);
- }
- }
-
- /* prevent deadlock in next flush */
- if(pthread_mutex_unlock(&flush_lock) < 0)
- abort();
-
-
- /* report back to local logger */
- switch(result) {
- case 1:
- result = 0; break;
- case 0:
- result = EDG_WLL_IL_EVENTS_WAITING; break;
- default:
- result = EDG_WLL_IL_SYS; break;
- }
- if(job_id_s) free(job_id_s);
- result = send_confirmation(receipt, result);
- if(result <= 0)
- il_log(LOG_ERR, "handle_cmd: error sending status: %s\n", error_get_msg());
- return(1);
-
-
-cmd_error:
- if(job_id_s) free(job_id_s);
- return(-1);
-#else
- return(0);
-#endif /* INTERLOGD_FLUSH */
-}
-#endif /* INTERLOGD_HANDLE_CMD */
-
-
-static
-int
-handle_msg(il_octet_string_t *event, long offset)
-{
- struct server_msg *msg = NULL;
-#if !defined(IL_NOTIFICATIONS)
- struct event_queue *eq_l;
-#endif
- struct event_queue *eq_s;
- struct event_store *es;
-
- int ret;
-
- /* convert event to message for server */
- if((msg = server_msg_create(event, offset)) == NULL) {
- il_log(LOG_ERR, " handle_msg: error parsing event '%s':\n %s\n", event, error_get_msg());
- return(0);
- }
-
- /* sync event store with IPC (if neccessary)
- * This MUST be called before inserting event into output queue! */
- if((es = event_store_find(msg->job_id_s)) == NULL)
- return(-1);
- msg->es = es;
-
- ret = event_store_sync(es, offset);
- il_log(LOG_DEBUG, " syncing event store at %d with event at %d, result %d\n", es->offset, offset, ret);
- if(ret < 0) {
- il_log(LOG_ERR, " handle_msg: error syncing event store:\n %s\n", error_get_msg());
- event_store_release(es);
- return(0);
- } else if(ret == 0) {
- /* we have seen this event already */
- server_msg_free(msg);
- event_store_release(es);
- return(1);
- }
-
- /* find apropriate queue for this event */
-#if defined(IL_NOTIFICATIONS)
- eq_s = queue_list_get(msg->dest);
-#else
- eq_s = queue_list_get(msg->job_id_s);
-#endif
- if(eq_s == NULL) {
- il_log(LOG_ERR, " handle_msg: apropriate queue not found: %s\n", error_get_msg());
- clear_error();
- } else {
- if(enqueue_msg(eq_s, msg) < 0)
- goto err;
- }
-
-#if !defined(IL_NOTIFICATIONS)
- eq_l = queue_list_get(NULL);
- if(!bs_only && eq_l != eq_s) {
- /* send to default queue (logging server) as well */
- if(enqueue_msg(eq_l, msg) < 0)
- goto err;
- }
-#endif
-
- /* if there was no error, set the next expected event offset */
- event_store_next(es, offset, msg->ev_len);
-
- /* allow cleanup thread to check on this event_store */
- event_store_release(es);
-
- /* free the message */
- server_msg_free(msg);
- return(1);
-
-err:
- event_store_release(es);
- server_msg_free(msg);
- return(-1);
-}
-
-
-
-int
-loop()
-{
- /* receive events */
- while(1) {
- il_octet_string_t msg;
- long offset;
- int ret;
-
- if(killflg)
- exit(0);
-
- clear_error();
- if((ret = input_queue_get(&msg, &offset, INPUT_TIMEOUT)) < 0)
- {
- if(error_get_maj() == IL_PROTO) {
- il_log(LOG_DEBUG, " premature EOF while receiving event\n");
- /* problems with socket input, try to catch up from files */
-#ifndef PERF_EMPTY
- event_store_recover_all();
-#endif
- continue;
- } else
- return(-1);
- }
- else if(ret == 0) {
- continue;
- }
-
-#ifdef PERF_EMPTY
- glite_wll_perftest_consumeEventString(msg.data);
- free(msg.data);
- continue;
-#endif
-
-#ifdef INTERLOGD_HANDLE_CMD
- ret = handle_cmd(&msg, offset);
- if(ret == 0)
-#endif
- ret = handle_msg(&msg, offset);
- free(msg.data);
- if(ret < 0)
- switch (error_get_maj()) {
- case IL_SYS:
- case IL_NOMEM:
- return (ret);
- break;
- default:
- il_log(LOG_ERR, "Error: %s\n", error_get_msg());
- break;
- }
- } /* while */
-}
+++ /dev/null
-#ident "$Header$"
-
-#include <sys/socket.h>
-#include <sys/un.h>
-#include <sys/time.h>
-#include <unistd.h>
-#include <errno.h>
-#include <assert.h>
-#include <string.h>
-
-#include "interlogd.h"
-
-
-static const int SOCK_QUEUE_MAX = 50;
-extern char *socket_path;
-
-static int sock;
-static int accepted;
-
-int
-input_queue_attach()
-{
- struct sockaddr_un saddr;
-
- if((sock=socket(PF_UNIX, SOCK_STREAM, 0)) < 0) {
- set_error(IL_SYS, errno, "input_queue_attach: error creating socket");
- return(-1);
- }
-
- memset(&saddr, 0, sizeof(saddr));
- saddr.sun_family = AF_UNIX;
- strcpy(saddr.sun_path, socket_path);
-
- /* test for the presence of the socket and another instance
- of interlogger listening */
- if(connect(sock, (struct sockaddr *)&saddr, sizeof(saddr.sun_path)) < 0) {
- if(errno == ECONNREFUSED) {
- /* socket present, but no one at the other end; remove it */
- il_log(LOG_WARNING, " removing stale input socket %s\n", socket_path);
- unlink(socket_path);
- }
- /* ignore other errors for now */
- } else {
- /* connection was successful, so bail out - there is
- another interlogger running */
- set_error(IL_SYS, EADDRINUSE, "input_queue_attach: another instance of interlogger is running");
- return(-1);
- }
-
- if(bind(sock, (struct sockaddr *)&saddr, sizeof(saddr)) < 0) {
- set_error(IL_SYS, errno, "input_queue_attach: error binding socket");
- return(-1);
- }
-
- if (listen(sock, SOCK_QUEUE_MAX)) {
- set_error(IL_SYS, errno, "input_queue_attach: error listening on socket");
- return -1;
- }
-
- return(0);
-}
-
-void input_queue_detach()
-{
- if (sock >= 0)
- close(sock);
- unlink(socket_path);
-}
-
-
-#define DEFAULT_CHUNK_SIZE 1024
-
-static
-int
-read_event(int sock, long *offset, il_octet_string_t *msg)
-{
- char *buffer, *p, *n;
- int len, alen, i, chunk_size = DEFAULT_CHUNK_SIZE;
- static char buf[1024];
-
- msg->data = NULL;
- msg->len = 0;
-
- /* receive offset */
- len = recv(sock, offset, sizeof(*offset), MSG_NOSIGNAL);
- if(len < sizeof(*offset)) {
- set_error(IL_PROTO, errno, "read_event: error reading offset");
- return(-1);
- }
-
- /* receive event string */
- buffer=malloc(8*chunk_size);
- if(buffer == NULL) {
- set_error(IL_NOMEM, ENOMEM, "read_event: no room for event");
- return(-1);
- }
- p = buffer;
- alen = 8*chunk_size;
-
- /* Variables used here:
- - buffer points to allocated memory,
- - alen is the allocated memory size,
- - p points to the first free location in buffer,
- - len is the amount actually read by recv,
- - i is the amount of data belonging to the current event (including separator).
- - n points to event separator or is NULL
- Hence:
- (p - buffer) gives the amount of valid data read so far,
- (alen - (p - buffer)) is the free space,
- */
-
-#if 1
- /* Reading events - optimized version. Attempts to increase chunks read by recv
- * when there are more data, reads directly into destination memory (instead of
- * copying from static buffer) etc.
- *
- * For some reason it is not much faster than the old variant.
- */
- do {
- /* prepare at least chunk_size bytes for next data */
- if(alen - (p - buffer) < chunk_size) {
- alen += (chunk_size < 8192) ? 8192 : 8*chunk_size;
- n = realloc(buffer, alen);
- if(n == NULL) {
- free(buffer);
- set_error(IL_NOMEM, ENOMEM, "read_event: no room for event");
- return(-1);
- }
- p = n + (p - buffer);
- buffer = n;
- }
-
- /* read chunk */
- if((len=recv(sock, p, chunk_size, MSG_PEEK | MSG_NOSIGNAL)) > 0) {
- /* find the end of event, if any */
- /* faster (and dirty) way of doing strnchr (which is not in libc, anyway) */
- if((n=memccpy(p, p, EVENT_SEPARATOR, len)) != NULL) {
- i = n - p; /* length including separator */
- } else {
- i = len;
- /* long event, huh? try reading more data at once */
- chunk_size += 1024;
- }
- /* remove the relevant data from input */
- /* i > 0 */
- if(recv(sock, p, i, MSG_NOSIGNAL) != i) {
- set_error(IL_SYS, errno, "read_event: error reading data");
- free(buffer);
- return(-1);
- }
- /* move the pointer to the first free place, separator is considered free space */
- p = (n == NULL) ? p + len : n - 1;
- }
- } while ( (len > 0) && (n == NULL) );
-
-#else
- /* Reading events - original version.
- * Appears to behave quite good, anyway.
- */
- while((len=recv(sock, buf, sizeof(buf), MSG_PEEK | MSG_NOSIGNAL)) > 0) {
-
- /* we have to be prepared for sizeof(buf) bytes */
- if(alen - (p - buffer) < (int)sizeof(buf)) {
- alen += 8192;
- n = realloc(buffer, alen);
- if(n == NULL) {
- free(buffer);
- set_error(IL_NOMEM, ENOMEM, "read_event: no room for event");
- return(-1);
- }
- p = p - buffer + n;
- buffer = n;
- }
-
- /* copy all relevant bytes from buffer */
- n = (char*)memccpy(p, buf, EVENT_SEPARATOR, len);
- if(n) {
- /* separator found */
- n--; /* but do not preserve it */
- i = n - p;
- p = n;
- } else {
- /* separator not found */
- i = len;
- p += len;
- }
- /* This was definitely slowing us down:
- * for(i=0; (i < len) && (buf[i] != EVENT_SEPARATOR); i++)
- * *p++ = buf[i];
- */
-
- /* remove the data from queue */
- if(i > 0)
- if(recv(sock, buf, i, MSG_NOSIGNAL) != i) {
- set_error(IL_SYS, errno, "read_event: error reading data");
- free(buffer);
- return(-1);
- }
- if(i < len)
- /* the event is complete */
- break;
- }
-#endif
-
- /* terminate buffer */
- *p = 0;
-
- if(len < 0) {
- set_error(IL_SYS, errno, "read_event: error reading data");
- free(buffer);
- return(-1);
- }
-
- /* if len == 0, we have not encountered EVENT_SEPARATOR and thus the event is not complete */
- if(len == 0) {
- set_error(IL_PROTO, errno, "read_event: error reading data - premature EOF");
- free(buffer);
- return(-1);
- }
-
-#if 0
- /* this is probably not necessary at all:
- either len <=0, which was covered before,
- or 0 <= i < len => p > buffer;
- I would say this condition can not be satisfied.
- */
- if(p == buffer) {
- set_error(IL_PROTO, errno, "read_event: error reading data - no data received");
- free(buffer);
- return(-1);
- }
-#endif
-
- msg->data = buffer;
- msg->len = p - buffer;
- return(msg->len);
-}
-
-
-/*
- * Returns: -1 on error, 0 if no message available, message length otherwise
- *
- */
-#ifdef PERF_EVENTS_INLINE
-int
-input_queue_get(il_octet_string *buffer, long *offset, int timeout)
-{
- static long o = 0;
- int len;
-
- len = glite_wll_perftest_produceEventString(&buffer->data);
- buffer->len = len;
- if(len) {
- o += len;
- *offset = o;
- } else if (len == 0) {
- sleep(timeout);
- }
- return(len);
-}
-#else
-int
-input_queue_get(il_octet_string_t *buffer, long *offset, int timeout)
-{
- fd_set fds;
- struct timeval tv;
- int msg_len;
-
- assert(buffer != NULL);
-
- FD_ZERO(&fds);
- FD_SET(sock, &fds);
-
- tv.tv_sec = timeout;
- tv.tv_usec = 0;
-
- msg_len = select(sock + 1, &fds, NULL, NULL, timeout >= 0 ? &tv : NULL);
- switch(msg_len) {
-
- case 0: /* timeout */
- return(0);
-
- case -1: /* error */
- switch(errno) {
- case EINTR:
- il_log(LOG_DEBUG, " interrupted while waiting for event!\n");
- return(0);
-
- default:
- set_error(IL_SYS, errno, "input_queue_get: error waiting for event");
- return(-1);
- }
- default:
- break;
- }
-
- if((accepted=accept(sock, NULL, NULL)) < 0) {
- set_error(IL_SYS, errno, "input_queue_get: error accepting connection");
- return(-1);
- }
-
- read_event(accepted, offset, buffer);
- close(accepted);
-
- if(buffer->data == NULL) {
- if(error_get_maj() != IL_OK)
- return(-1);
- else
- return(0);
- }
-
- return(buffer->len);
-}
-#endif
+++ /dev/null
-#ident "$Header$"
-
-/*
- interlogger - collect events from local-logger and send them to logging and bookkeeping servers
-
-*/
-#include <getopt.h>
-#include <string.h>
-#include <signal.h>
-#include <pthread.h>
-
-#include <globus_common.h>
-
-#include "interlogd.h"
-#include "glite/lb/consumer.h"
-#include "glite/security/glite_gss.h"
-#ifdef LB_PERF
-#include "glite/lb/lb_perftest.h"
-#endif
-
-#define EXIT_FAILURE 1
-#if defined(IL_NOTIFICATIONS)
-#define DEFAULT_PREFIX "/tmp/notif_events"
-#define DEFAULT_SOCKET "/tmp/notif_interlogger.sock"
-#else
-#define DEFAULT_PREFIX "/tmp/dglogd.log"
-#define DEFAULT_SOCKET "/tmp/interlogger.sock"
-#endif
-
-
-/* The name the program was run with, stripped of any leading path. */
-char *program_name;
-int killflg = 0;
-
-int TIMEOUT = DEFAULT_TIMEOUT;
-
-gss_cred_id_t cred_handle = GSS_C_NO_CREDENTIAL;
-pthread_mutex_t cred_handle_lock = PTHREAD_MUTEX_INITIALIZER;
-
-time_t key_mtime = 0, cert_mtime = 0;
-
-static void usage (int status)
-{
- printf("%s - \n"
- " collect events from local-logger and send them to logging and bookkeeping servers\n"
- "Usage: %s [OPTION]... [FILE]...\n"
- "Options:\n"
- " -h, --help display this help and exit\n"
- " -V, --version output version information and exit\n"
- " -d, --debug do not run as daemon\n"
- " -v, --verbose print extensive debug output\n"
- " -f, --file-prefix <prefix> path and prefix for event files\n"
- " -c, --cert <file> location of server certificate\n"
- " -k, --key <file> location of server private key\n"
- " -C, --CAdir <dir> directory containing CA certificates\n"
- " -b, --book send events to bookkeeping server only\n"
- " -l, --log-server <host> specify address of log server\n"
- " -s, --socket <path> non-default path of local socket\n"
-#ifdef LB_PERF
- " -n, --nosend PERFTEST: consume events instead of sending\n"
-#ifdef PERF_EVENTS_INLINE
- " -e, --event_file <file> PERFTEST: file to read test events from\n"
- " -j, --njobs <n> PERFTEST: number of jobs to send\n"
-#endif
-#endif
- , program_name, program_name);
- exit(status);
-}
-
-
-/* Option flags and variables */
-static int debug;
-static int verbose = 0;
-char *file_prefix = DEFAULT_PREFIX;
-int bs_only = 0;
-#ifdef LB_PERF
-int nosend = 0;
-char *event_source = NULL;
-int njobs = 0;
-#endif
-
-char *cert_file = NULL;
-char *key_file = NULL;
-char *CAcert_dir = NULL;
-char *log_server = NULL;
-char *socket_path = DEFAULT_SOCKET;
-
-static struct option const long_options[] =
-{
- {"help", no_argument, 0, 'h'},
- {"version", no_argument, 0, 'V'},
- {"verbose", no_argument, 0, 'v'},
- {"debug", no_argument, 0, 'd'},
- {"file-prefix", required_argument, 0, 'f'},
- {"cert", required_argument, 0, 'c'},
- {"key", required_argument, 0, 'k'},
- {"book", no_argument, 0, 'b'},
- {"CAdir", required_argument, 0, 'C'},
- {"log-server", required_argument, 0, 'l'},
- {"socket", required_argument, 0, 's'},
-#ifdef LB_PERF
- {"nosend", no_argument, 0, 'n'},
-#ifdef PERF_EVENTS_INLINE
- {"event_file", required_argument, 0, 'e'},
- {"njobs", required_argument, NULL, 'j'},
-#endif
-#endif
- {NULL, 0, NULL, 0}
-};
-
-
-
-/* Set all the option flags according to the switches specified.
- Return the index of the first non-option argument. */
-static int
-decode_switches (int argc, char **argv)
-{
- int c;
-
- debug = 0;
-
- while ((c = getopt_long (argc, argv,
- "f:" /* file prefix */
- "h" /* help */
- "V" /* version */
- "v" /* verbose */
- "c:" /* certificate */
- "k:" /* key */
- "C:" /* CA dir */
- "b" /* only bookeeping */
- "l:" /* log server */
- "d" /* debug */
-#ifdef LB_PERF
- "n" /* nosend */
-#ifdef PERF_EVENTS_INLINE
- "e:" /* event file */
- "j:" /* num jobs */
-#endif
-#endif
- "s:", /* socket */
- long_options, (int *) 0)) != EOF)
- {
- switch (c)
- {
- case 'V':
- printf ("interlogger %s\n", VERSION);
- exit (0);
-
- case 'v':
- verbose = 1;
- break;
-
- case 'h':
- usage (0);
-
- case 'd':
- debug = 1;
- break;
-
- case 'f':
- file_prefix = strdup(optarg);
- break;
-
- case 'c':
- cert_file = strdup(optarg);
- break;
-
- case 'k':
- key_file = strdup(optarg);
- break;
-
- case 'b':
- bs_only = 1;
- break;
-
- case 'l':
- log_server = strdup(optarg);
- break;
-
- case 'C':
- CAcert_dir = strdup(optarg);
- break;
-
- case 's':
- socket_path = strdup(optarg);
- break;
-
-#ifdef LB_PERF
- case 'n':
- nosend = 1;
- break;
-
-#ifdef PERF_EVENTS_INLINE
- case 'e':
- event_source = strdup(optarg);
- break;
-
- case 'j':
- njobs = atoi(optarg);
- break;
-#endif
-#endif
-
- default:
- usage (EXIT_FAILURE);
- }
- }
-
- return optind;
-}
-
-
-void handle_signal(int num) {
- il_log(LOG_DEBUG, "Received signal %d\n", num);
- killflg++;
-}
-
-int
-main (int argc, char **argv)
-{
- int i;
- char *p;
- edg_wll_GssStatus gss_stat;
- int ret;
-
- program_name = argv[0];
-
- setlinebuf(stdout);
- setlinebuf(stderr);
-
- i = decode_switches (argc, argv);
-
- if ((p = getenv("EDG_WL_INTERLOG_TIMEOUT"))) TIMEOUT = atoi(p);
-
- /* force -b if we do not have log server */
- if(log_server == NULL) {
- log_server = strdup(DEFAULT_LOG_SERVER);
- bs_only = 1;
- }
-
- if(init_errors(verbose ? LOG_DEBUG : LOG_WARNING)) {
- fprintf(stderr, "Failed to initialize error message subsys. Exiting.\n");
- exit(EXIT_FAILURE);
- }
-
- if (signal(SIGPIPE, handle_signal) == SIG_ERR
- || signal(SIGABRT, handle_signal) == SIG_ERR
- || signal(SIGTERM, handle_signal) == SIG_ERR
- || signal(SIGINT, handle_signal) == SIG_ERR) {
- perror("signal");
- exit(EXIT_FAILURE);
- }
-
-#ifdef LB_PERF
- /* this must be called after installing signal handlers */
- glite_wll_perftest_init(NULL, /* host */
- NULL, /* user */
- NULL, /* test name */
- event_source,
- njobs);
-#endif
-
- il_log(LOG_INFO, "Initializing input queue...\n");
- if(input_queue_attach() < 0) {
- il_log(LOG_CRIT, "Failed to initialize input queue: %s\n", error_get_msg());
- exit(EXIT_FAILURE);
- }
-
- /* initialize output queues */
- il_log(LOG_INFO, "Initializing event queues...\n");
- if(queue_list_init(log_server) < 0) {
- il_log(LOG_CRIT, "Failed to initialize output event queues: %s\n", error_get_msg());
- exit(EXIT_FAILURE);
- }
-
- if (CAcert_dir)
- setenv("X509_CERT_DIR", CAcert_dir, 1);
-
- edg_wll_gss_watch_creds(cert_file,&cert_mtime);
- ret = edg_wll_gss_acquire_cred_gsi(cert_file, key_file, &cred_handle, NULL, &gss_stat);
- if (ret) {
- char *gss_err = NULL;
- char *str;
-
- if (ret == EDG_WLL_GSS_ERROR_GSS)
- edg_wll_gss_get_error(&gss_stat, "edg_wll_gss_acquire_cred_gsi()", &gss_err);
- asprintf(&str, "Failed to load GSI credential: %s\n",
- (gss_err) ? gss_err : "edg_wll_gss_acquire_cred_gsi() failed");
- il_log(LOG_CRIT, str);
- free(str);
- if (gss_err)
- free(gss_err);
- exit(EXIT_FAILURE);
- }
-
- if(!debug &&
- (daemon(0,0) < 0)) {
- perror("daemon");
- exit(EXIT_FAILURE);
- }
-
- if (globus_module_activate(GLOBUS_COMMON_MODULE) != GLOBUS_SUCCESS) {
- il_log(LOG_CRIT, "Failed to initialize Globus common module\n");
- exit(EXIT_FAILURE);
- }
-
-#ifndef PERF_EMPTY
- /* find all unsent events waiting in files */
- {
- pthread_t rid;
-
- il_log(LOG_INFO, "Starting recovery thread...\n");
- if(pthread_create(&rid, NULL, recover_thread, NULL) < 0) {
- il_log(LOG_CRIT, "Failed to start recovery thread: %s\n", strerror(errno));
- exit(EXIT_FAILURE);
- }
- pthread_detach(rid);
- }
-#endif
-
- il_log(LOG_INFO, "Entering main loop...\n");
-
- /* do the work */
- if(loop() < 0) {
- il_log(LOG_CRIT, "Fatal error: %s\n", error_get_msg());
- if (killflg) {
- input_queue_detach();
- exit(EXIT_FAILURE);
- }
- }
- il_log(LOG_INFO, "Done!\n");
- input_queue_detach();
-
- exit (0);
-}
+++ /dev/null
-#ifndef INTERLOGGER_P_H
-#define INTERLOGGER_P_H
-
-#ident "$Header$"
-
-#include "il_error.h"
-#include "glite/security/glite_gss.h"
-#include "glite/lb/il_msg.h"
-
-#include <pthread.h>
-#include <sys/time.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#include <stdlib.h>
-#ifdef HAVE_DMALLOC_H
-#include <dmalloc.h>
-#endif
-
-#define INTERLOGD_HANDLE_CMD
-#define INTERLOGD_FLUSH
-#define INTERLOGD_EMS
-
-#define DEFAULT_USER "michal"
-#define DEFAULT_LOG_SERVER "localhost"
-#define DEFAULT_TIMEOUT 60
-
-#ifdef LB_PERF
-#include "glite/lb/lb_perftest.h"
-#endif
-
-#if defined(IL_NOTIFICATIONS)
-
-#include "glite/lb/notifid.h"
-
-#undef INTERLOGD_HANDLE_CMD
-#undef INTERLOGD_FLUSH
-#undef INTERLOGD_EMS
-#define IL_EVENT_ID_T edg_wll_NotifId
-#define IL_EVENT_GET_UNIQUE(a) edg_wll_NotifIdGetUnique((a))
-#define IL_EVENT_ID_FREE(a) edg_wll_NotifIdFree((a))
-#define IL_EVENT_ID_PARSE(a,b) edg_wll_NotifIdParse((a),(b))
-
-#else
-
-#define INTERLOGD_HANDLE_CMD
-#define INTERLOGD_FLUSH
-#define INTERLOGD_EMS
-#define IL_EVENT_ID_T edg_wlc_JobId
-#define IL_EVENT_GET_UNIQUE(a) edg_wlc_JobIdGetUnique((a))
-#define IL_EVENT_ID_FREE(a) edg_wlc_JobIdFree((a))
-#define IL_EVENT_ID_PARSE(a,b) edg_wlc_JobIdParse((a),(b))
-
-#endif
-
-
-#define EVENT_SEPARATOR '\n'
-
-// #define TIMEOUT 5
-extern int TIMEOUT;
-#define INPUT_TIMEOUT (60)
-
-
-extern gss_cred_id_t cred_handle;
-extern pthread_mutex_t cred_handle_lock;
-extern char *cert_file;
-extern char *key_file;
-extern char *CAcert_dir;
-extern int bs_only;
-extern int killflg;
-#ifdef LB_PERF
-extern int nosend;
-#ifdef PERF_EVENTS_INLINE
-extern char *event_source;
-#endif
-#endif
-
-/* shared data for thread communication */
-#ifdef INTERLOGD_FLUSH
-extern pthread_mutex_t flush_lock;
-extern pthread_cond_t flush_cond;
-#endif
-
-struct event_store {
- char *event_file_name; /* file with events from local logger */
- char *control_file_name; /* file with control information */
- char *job_id_s; /* string form of the job id */
- long last_committed_bs; /* offset behind event that was last committed by BS */
- long last_committed_ls; /* -"- LS */
- long offset; /* expected file position of next event */
- int recovering; /* flag for recovery mode */
- pthread_rwlock_t update_lock; /* lock to prevent simultaneous updates */
- pthread_rwlock_t use_lock; /* lock to prevent struct deallocation */
-#if defined(IL_NOTIFICATIONS)
- char *dest; /* host:port destination */
-#endif
-};
-
-
-struct server_msg {
- char *job_id_s; /* necessary for commit */
- long offset; /* just for printing more information to debug */
- char *msg;
- int len;
- int ev_len;
- struct event_store *es; /* cache for corresponding event store */
- long receipt_to; /* receiver (long local-logger id - LLLID) of delivery confirmation (for priority messages) */
-#if defined(IL_NOTIFICATIONS)
- char *dest_name;
- int dest_port;
- char *dest;
-#endif
-};
-
-
-struct event_queue {
- edg_wll_GssConnection gss; /* GSS connection */
- char *dest_name;
- int dest_port;
- int timeout; /* queue timeout */
- struct event_queue_msg *tail; /* last message in the queue */
- struct event_queue_msg *head; /* first message in the queue */
-#if defined(INTERLOGD_EMS)
- struct event_queue_msg *tail_ems; /* last priority message in the queue (or NULL) */
- struct event_queue_msg *mark_this; /* mark message for removal */
- struct event_queue_msg *mark_prev; /* predecessor of the marked message */
-#endif
- pthread_t thread_id; /* id of associated thread */
- pthread_rwlock_t update_lock; /* mutex for queue updates */
- pthread_mutex_t cond_lock; /* mutex for condition variable */
- pthread_cond_t ready_cond; /* condition variable for message arrival */
-#if defined(INTERLOGD_HANDLE_CMD) && defined(INTERLOGD_FLUSH)
- int flushing;
- int flush_result; /* result of flush operation */
- pthread_cond_t flush_cond; /* condition variable for flush operation */
-#endif
-};
-
-
-/* server msg methods */
-struct server_msg *server_msg_create(il_octet_string_t *, long);
-struct server_msg *server_msg_copy(struct server_msg *);
-int server_msg_init(struct server_msg *, il_octet_string_t *);
-#if defined(INTERLOGD_EMS)
-int server_msg_is_priority(struct server_msg *);
-#endif
-int server_msg_free(struct server_msg *);
-
-/* general event queue methods */
-struct event_queue *event_queue_create(char *);
-int event_queue_free(struct event_queue *);
-int event_queue_empty(struct event_queue *);
-int event_queue_insert(struct event_queue *, struct server_msg *);
-int event_queue_get(struct event_queue *, struct server_msg **);
-int event_queue_remove(struct event_queue *);
-int event_queue_enqueue(struct event_queue *, char *);
-/* helper */
-int enqueue_msg(struct event_queue *, struct server_msg *);
-
-/* protocol event queue methods */
-int event_queue_connect(struct event_queue *);
-int event_queue_send(struct event_queue *);
-int event_queue_close(struct event_queue *);
-int send_confirmation(long, int);
-
-/* thread event queue methods */
-int event_queue_create_thread(struct event_queue *);
-int event_queue_lock(struct event_queue *);
-int event_queue_unlock(struct event_queue *);
-int event_queue_lock_ro(struct event_queue *);
-int event_queue_signal(struct event_queue *);
-int event_queue_wait(struct event_queue *, int);
-int event_queue_sleep(struct event_queue *);
-int event_queue_wakeup(struct event_queue *);
-int event_queue_cond_lock(struct event_queue *);
-int event_queue_cond_unlock(struct event_queue *);
-
-/* input queue */
-int input_queue_attach();
-void input_queue_detach();
-int input_queue_get(il_octet_string_t *, long *, int);
-
-/* queue management functions */
-int queue_list_init(char *);
-struct event_queue *queue_list_get(char *);
-struct event_queue *queue_list_first();
-struct event_queue *queue_list_next();
-int queue_list_is_log(struct event_queue *);
-
-#if defined(IL_NOTIFICATIONS)
-struct event_queue *notifid_map_get_dest(const char *);
-int notifid_map_set_dest(const char *, struct event_queue *);
-int event_queue_move_events(struct event_queue *, struct event_queue *, char *);
-#endif
-
-/* event store functions */
-int event_store_init(char *);
-int event_store_cleanup();
-int event_store_recover_all(void);
-struct event_store *event_store_find(char *);
-int event_store_sync(struct event_store *, long);
-int event_store_next(struct event_store *, long, int);
-int event_store_commit(struct event_store *, int, int);
-int event_store_recover(struct event_store *);
-int event_store_release(struct event_store *);
-/* int event_store_remove(struct event_store *); */
-
-/* master main loop */
-int loop();
-
-/* recover thread */
-void *recover_thread(void*);
-
-#endif
+++ /dev/null
-#ident "$Header$"
-
-#include <sys/types.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/wait.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#include <signal.h>
-#include <unistd.h>
-#include <string.h>
-#include <getopt.h>
-
-#include <globus_common.h>
-
-#include "glite/lb/context-int.h"
-#include "logd_proto.h"
-#include "glite/lb/consumer.h"
-#include "glite/security/glite_gss.h"
-#ifdef LB_PERF
-#include "glite/lb/lb_perftest.h"
-#endif
-
-static const char rcsid[] = "@(#)$Id$";
-static int verbose = 0;
-static int debug = 0;
-static int port = EDG_WLL_LOG_PORT_DEFAULT;
-static char *prefix = EDG_WLL_LOG_PREFIX_DEFAULT;
-static char *cert_file = NULL;
-static char *key_file = NULL;
-static char *CAcert_dir = NULL;
-static int noAuth = 0;
-static int noIPC = 0;
-static int noParse = 0;
-
-#define DEFAULT_SOCKET "/tmp/interlogger.sock"
-char *socket_path = DEFAULT_SOCKET;
-
-extern int confirm_sock;
-extern char confirm_sock_name[256];
-
-static struct option const long_options[] = {
- { "help", no_argument, 0, 'h' },
- { "version", no_argument, 0, 'V' },
- { "verbose", no_argument, 0, 'v' },
- { "debug", no_argument, 0, 'd' },
- { "port", required_argument, 0, 'p' },
- { "file-prefix", required_argument, 0, 'f' },
- { "cert", required_argument, 0, 'c' },
- { "key", required_argument, 0, 'k' },
- { "CAdir", required_argument, 0, 'C' },
- { "socket",required_argument, 0, 's' },
- { "noAuth", no_argument, 0, 'x' },
- { "noIPC", no_argument, 0, 'y' },
- { "noParse", no_argument, 0, 'z' },
- { NULL, 0, NULL, 0}
-};
-
-/*
- *----------------------------------------------------------------------
- *
- * usage - print usage
- *
- *----------------------------------------------------------------------
- */
-
-static void
-usage(char *program_name) {
- fprintf(stdout,"%s\n"
- "- collect events from logging API calls,\n"
- "- save them to files and\n"
- "- send them to inter-logger\n\n"
- "Usage: %s [option]\n"
- "-h, --help display this help and exit\n"
- "-V, --version output version information and exit\n"
- "-d, --debug do not run as daemon\n"
- "-v, --verbose print extensive debug output\n"
- "-p, --port <num> port to listen\n"
- "-f, --file-prefix <prefix> path and prefix for event files\n"
- "-c, --cert <file> location of server certificate\n"
- "-k, --key <file> location of server private key\n"
- "-C, --CAdir <dir> directory containing CA certificates\n"
- "-s, --socket <dir> socket to send messages (NOT IMPLEMENTED YET)\n"
- "--noAuth do not check caller's identity\n"
- "--noIPC do not send messages to inter-logger\n"
- "--noParse do not parse messages for correctness\n",
- program_name,program_name);
-}
-
-static sighandler_t mysignal(int num,sighandler_t handler)
-{
- struct sigaction sa,osa;
-
- memset(&sa,0,sizeof(sa));
- sa.sa_handler = handler;
- sa.sa_flags = SA_RESTART;
- return sigaction(num,&sa,&osa) ? SIG_ERR : osa.sa_handler;
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * handle_signal -
- * USR1 - increase the verbosity of the program
- * USR2 - decrease the verbosity of the program
- *
- *----------------------------------------------------------------------
- */
-void handle_signal(int num) {
- if (num != SIGCHLD) edg_wll_ll_log(LOG_NOTICE,"Received signal %d\n", num);
- switch (num) {
- case SIGUSR1:
- if (edg_wll_ll_log_level < LOG_DEBUG) edg_wll_ll_log_level++;
- edg_wll_ll_log(LOG_NOTICE,"Logging level is now %d\n", edg_wll_ll_log_level);
- break;
- case SIGUSR2:
- if (edg_wll_ll_log_level > LOG_EMERG) edg_wll_ll_log_level--;
- edg_wll_ll_log(LOG_NOTICE,"Logging level is now %d\n", edg_wll_ll_log_level);
- break;
- case SIGPIPE:
- edg_wll_ll_log(LOG_NOTICE,"Broken pipe, lost communication channel.\n");
- break;
- case SIGCHLD:
- while (wait3(NULL,WNOHANG,NULL) > 0);
- break;
- case SIGINT:
- case SIGTERM:
- case SIGQUIT:
- if (confirm_sock) {
- edg_wll_ll_log(LOG_NOTICE,"Closing confirmation socket.\n");
- close(confirm_sock);
- unlink(confirm_sock_name);
- }
- exit(1);
- break;
- default: break;
- }
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * doit - do all the dirty work
- *
- *----------------------------------------------------------------------
- */
-static int
-doit(int socket, gss_cred_id_t cred_handle, char *file_name_prefix, int noipc, int noparse)
-{
- char *subject;
- int ret;
- struct timeval timeout = {10,0};
- edg_wll_GssConnection con;
- edg_wll_GssStatus gss_stat;
- gss_buffer_desc gss_token = GSS_C_EMPTY_BUFFER;
- gss_name_t client_name = GSS_C_NO_NAME;
- OM_uint32 min_stat;
- gss_OID name_type = GSS_C_NO_OID;
-
- /* authentication */
- edg_wll_ll_log(LOG_INFO,"Processing authentication:\n");
-// FIXME - put here some meaningfull value of timeout + do somthing if timeouted
- ret = edg_wll_gss_accept(cred_handle,socket,&timeout,&con, &gss_stat);
- if (ret) {
- edg_wll_ll_log(LOG_ERR,"edg_wll_gss_accept() failed\n");
- return(-1);
- }
-
- gss_stat.major_status = gss_inquire_context(&gss_stat.minor_status, con.context,
- &client_name, NULL, NULL, NULL, NULL,
- NULL, NULL);
- if (GSS_ERROR(gss_stat.major_status)) {
- char *gss_err;
- edg_wll_gss_get_error(&gss_stat, "Cannot read client identification", &gss_err);
- edg_wll_ll_log(LOG_WARNING, "%s\n", gss_err);
- free(gss_err);
- } else {
- gss_stat.major_status = gss_display_name(&gss_stat.minor_status, client_name,
- &gss_token, &name_type);
- if (GSS_ERROR(gss_stat.major_status)) {
- char *gss_err;
- edg_wll_gss_get_error(&gss_stat, "Cannot process client identification", &gss_err);
- edg_wll_ll_log(LOG_WARNING, "%s\n", gss_err);
- free(gss_err);
- }
- }
-
- if (GSS_ERROR(gss_stat.major_status) ||
- edg_wll_gss_oid_equal(name_type, GSS_C_NT_ANONYMOUS)) {
- edg_wll_ll_log(LOG_INFO," User not authenticated, setting as \"%s\". \n",EDG_WLL_LOG_USER_DEFAULT);
- subject=strdup(EDG_WLL_LOG_USER_DEFAULT);
- } else {
- edg_wll_ll_log(LOG_INFO," User successfully authenticated as:\n");
- edg_wll_ll_log(LOG_INFO, " %s\n", (char *)gss_token.value);
- subject=gss_token.value;
- memset(&gss_token.value, 0, sizeof(gss_token.value));
- }
-
- ret = edg_wll_log_proto_server(&con,subject,file_name_prefix,noipc,noparse);
-
- edg_wll_gss_close(&con, NULL);
- if (subject) free(subject);
- if (gss_token.length)
- gss_release_buffer(&min_stat, &gss_token);
- if (client_name != GSS_C_NO_NAME)
- gss_release_name(&min_stat, &client_name);
- return ret;
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * Main -
- *
- *----------------------------------------------------------------------
- */
-int main(int argc, char *argv[])
-{
- int ret;
- int childpid;
- int opt;
-
- int listener_fd;
- int client_fd;
- struct sockaddr_in client_addr;
- int client_addr_len;
-
- char *my_subject_name = NULL;
-
- time_t cert_mtime = 0, key_mtime = 0;
- OM_uint32 min_stat;
- edg_wll_GssStatus gss_stat;
- gss_cred_id_t cred = GSS_C_NO_CREDENTIAL;
-
-
- setlinebuf(stdout);
- setlinebuf(stderr);
-
- /* welcome */
- fprintf(stdout,"\
-This is LocalLogger, part of Workload Management System in EU DataGrid.\
-Copyright (c) 2002 CERN, INFN and CESNET on behalf of the EU DataGrid.\n");
-
- /* get arguments */
- while ((opt = getopt_long(argc,argv,
- "h" /* help */
- "V" /* version */
- "v" /* verbose */
- "d" /* debug */
- "p:" /* port */
- "f:" /* file prefix */
- "c:" /* certificate */
- "k:" /* key */
- "C:" /* CA dir */
- "s:" /* socket */
- "x" /* noAuth */
- "y" /* noIPC */
- "z", /* noParse */
- long_options, (int *) 0)) != EOF) {
-
- switch (opt) {
- case 'V': fprintf(stdout,"%s:\t%s\n",argv[0],rcsid); exit(0);
- case 'v': verbose = 1; break;
- case 'd': debug = 1; break;
- case 'p': port = atoi(optarg); break;
- case 'f': prefix = optarg; break;
- case 'c': cert_file = optarg; break;
- case 'k': key_file = optarg; break;
- case 'C': CAcert_dir = optarg; break;
- case 's': socket_path = optarg; break;
- case 'x': noAuth = 1; break;
- case 'y': noIPC = 1; break;
- case 'z': noParse = 1; break;
- case 'h':
- default:
- usage(argv[0]); exit(0);
- }
- }
-#ifdef LB_PERF
- edg_wll_ll_log_init(verbose ? LOG_INFO : LOG_ERR);
-#else
- edg_wll_ll_log_init(verbose ? LOG_DEBUG : LOG_INFO);
-#endif
- edg_wll_ll_log(LOG_INFO,"Initializing...\n");
-
- /* check noParse */
- edg_wll_ll_log(LOG_INFO,"Parse messages for correctness...");
- if (noParse) {
- edg_wll_ll_log(LOG_INFO,"no.\n");
- } else {
- edg_wll_ll_log(LOG_INFO,"yes.\n");
- }
-
- /* check noIPC */
- edg_wll_ll_log(LOG_INFO,"Send messages also to inter-logger...");
- if (noIPC) {
- edg_wll_ll_log(LOG_INFO,"no.\n");
- } else {
- edg_wll_ll_log(LOG_INFO,"yes.\n");
- }
-
- /* check prefix correctness */
-/* XXX: check probably also write permisions */
- edg_wll_ll_log(LOG_INFO,"Store messages with the filename prefix \"%s\"...",prefix);
- if (strlen(prefix) > FILENAME_MAX - 34) {
- edg_wll_ll_log(LOG_INFO,"no.\n");
- edg_wll_ll_log(LOG_CRIT,"Too long prefix for file names, would not be able to write to log files. Exiting.\n");
- exit(1);
- } else {
- edg_wll_ll_log(LOG_INFO,"yes.\n");
- }
-
- if (CAcert_dir)
- setenv("X509_CERT_DIR", CAcert_dir, 1);
-
- /* initialize Globus common module */
- edg_wll_ll_log(LOG_INFO,"Initializing Globus common module...");
- if (globus_module_activate(GLOBUS_COMMON_MODULE) != GLOBUS_SUCCESS) {
- edg_wll_ll_log(LOG_NOTICE,"no.\n");
- edg_wll_ll_log(LOG_CRIT, "Failed to initialize Globus common module. Exiting.\n");
- exit(1);
- } else {
- edg_wll_ll_log(LOG_INFO,"yes.\n");
- }
-
- /* initialize signal handling */
- if (mysignal(SIGUSR1, handle_signal) == SIG_ERR) { perror("signal"); exit(1); }
- if (mysignal(SIGUSR2, handle_signal) == SIG_ERR) { perror("signal"); exit(1); }
- if (mysignal(SIGPIPE, handle_signal) == SIG_ERR) { perror("signal"); exit(1); }
- if (mysignal(SIGHUP, SIG_DFL) == SIG_ERR) { perror("signal"); exit(1); }
- if (mysignal(SIGINT, handle_signal) == SIG_ERR) { perror("signal"); exit(1); }
- if (mysignal(SIGQUIT, handle_signal) == SIG_ERR) { perror("signal"); exit(1); }
- if (mysignal(SIGTERM, handle_signal) == SIG_ERR) { perror("signal"); exit(1); }
- if (mysignal(SIGCHLD, handle_signal) == SIG_ERR) { perror("signal"); exit(1); }
-
-#ifdef LB_PERF
- glite_wll_perftest_init(NULL, NULL, NULL, NULL, 0);
-#endif
-
- edg_wll_gss_watch_creds(cert_file,&cert_mtime);
- /* XXX DK: support noAuth */
- ret = edg_wll_gss_acquire_cred_gsi(cert_file, key_file, &cred, &my_subject_name,
- &gss_stat);
- if (ret) {
- /* XXX DK: call edg_wll_gss_get_error() */
- edg_wll_ll_log(LOG_CRIT, "Failed to get GSI credentials. Exiting.\n");
- exit(1);
- }
-
- if (my_subject_name!=NULL) {
- edg_wll_ll_log(LOG_INFO," server running with certificate: %s\n",my_subject_name);
- free(my_subject_name);
- } else if (noAuth) {
- edg_wll_ll_log(LOG_INFO," running without certificate\n");
-#if 0
- /* XXX DK: */
- } else {
- edg_wll_ll_log(LOG_CRIT,"No server credential found. Exiting.\n");
- exit(1);
-#endif
- }
-
- /* do listen */
- edg_wll_ll_log(LOG_INFO,"Listening on port %d\n",port);
- listener_fd = do_listen(port);
- if (listener_fd == -1) {
- edg_wll_ll_log(LOG_CRIT,"Failed to listen on port %d\n",port);
- gss_release_cred(&min_stat, &cred);
- exit(-1);
- }
-
- client_addr_len = sizeof(client_addr);
- bzero((char *) &client_addr, client_addr_len);
-
- /* daemonize */
- edg_wll_ll_log(LOG_INFO,"Running as daemon...");
- if (debug) {
- edg_wll_ll_log(LOG_NOTICE,"no.\n");
- }
- else if (daemon(0,0) < 0) {
- edg_wll_ll_log(LOG_CRIT,"Failed to run as daemon. Exiting.\n");
- perror("daemon");
- exit(1);
- }
- else {
- edg_wll_ll_log(LOG_INFO,"yes.\n");
- }
-
- /*
- * Main loop
- */
- while (1) {
- edg_wll_ll_log(LOG_INFO,"Accepting incomming connections...\n");
- client_fd = accept(listener_fd, (struct sockaddr *) &client_addr,
- &client_addr_len);
- if (client_fd < 0) {
- close(listener_fd);
- edg_wll_ll_log(LOG_CRIT,"Failed to accept incomming connections\n");
- perror("accept");
- gss_release_cred(&min_stat, &cred);
- exit(-1);
- }
-
- switch (edg_wll_gss_watch_creds(cert_file,&cert_mtime)) {
- gss_cred_id_t newcred;
- case 0: break;
- case 1:
- ret = edg_wll_gss_acquire_cred_gsi(cert_file,key_file,&newcred,NULL,&gss_stat);
- if (ret) {
- edg_wll_ll_log(LOG_WARNING, "Reloading credentials failed, continue with older\n");
- } else {
- edg_wll_ll_log(LOG_INFO, "Reloading credentials\n");
- gss_release_cred(&min_stat, &cred);
- cred = newcred;
- }
- break;
- case -1:
- edg_wll_ll_log(LOG_WARNING, "edg_wll_gss_watch_creds failed\n");
- break;
- }
-
- /* FORK - change next line if fork() is not needed (for debugging for
- * example
- */
-#if 1
- if ((childpid = fork()) < 0) {
- perror("fork()");
- close(client_fd);
- }
- if (childpid == 0) {
- ret=doit(client_fd,cred,prefix,noIPC,noParse);
- close(client_fd);
- goto end;
- }
- if (childpid > 0) {
- close(client_fd);
- }
-#else
- ret=doit(client_fd,cred,prefix,noIPC,noParse);
- close(client_fd);
-#endif
- } /* while */
-
-end:
- close(listener_fd);
- gss_release_cred(&min_stat, &cred);
- exit(ret);
-}
+++ /dev/null
-#ident "$Header$"
-
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <sys/un.h>
-#include <unistd.h>
-#include <netinet/in.h>
-#include <signal.h>
-#include <string.h>
-#include <syslog.h>
-#include <fcntl.h>
-
-#include "glite/lb/context-int.h"
-#include "glite/lb/escape.h"
-#include "glite/lb/events_parse.h"
-
-#include "logd_proto.h"
-
-static const int one = 1;
-
-extern char* socket_path;
-
-int edg_wll_ll_log_level;
-
-#define tv_sub(a,b) {\
- (a).tv_usec -= (b).tv_usec;\
- (a).tv_sec -= (b).tv_sec;\
- if ((a).tv_usec < 0) {\
- (a).tv_sec--;\
- (a).tv_usec += 1000000;\
- }\
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * send_answer_back -
- *
- *----------------------------------------------------------------------
- */
-static int send_answer_back(edg_wll_GssConnection *con, int answer, struct timeval *timeout) {
- size_t count = 0;
- int err = 0;
- int ans = answer;
- u_int8_t ans_end[4];
- edg_wll_GssStatus gss_stat;
-
- edg_wll_ll_log(LOG_INFO,"Sending answer \"%d\" back to client...",answer);
- ans_end[0] = ans & 0xff; ans >>= 8;
- ans_end[1] = ans & 0xff; ans >>= 8;
- ans_end[2] = ans & 0xff; ans >>= 8;
- ans_end[3] = ans;
- if ((err = edg_wll_gss_write_full(con,ans_end,4,timeout,&count, &gss_stat)) < 0 ) {
- edg_wll_ll_log(LOG_INFO,"error.\n");
- return edg_wll_log_proto_server_failure(err,&gss_stat,"Error sending answer");
- } else {
- edg_wll_ll_log(LOG_INFO,"o.k.\n");
- return 0;
- }
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * wait_for_confirmation -
- *
- * Args: timeout - number of seconds to wait, 0 => wait indefinitely
- *
- * Returns: 1 => OK, *code contains error code sent by interlogger
- * 0 => timeout expired before anything interesting happened
- * -1 => some error (see errno for details)
- *
- *----------------------------------------------------------------------
- */
-int confirm_sock;
-char confirm_sock_name[256];
-
-static
-int init_confirmation()
-{
- struct sockaddr_un saddr;
-
- /* create socket */
- if((confirm_sock=socket(PF_UNIX, SOCK_STREAM, 0)) < 0) {
- edg_wll_ll_log(LOG_ERR,"init_confirmation(): error creating socket\n");
- SYSTEM_ERROR("socket");
- return(-1);
- }
-
- /* set the socket parameters */
- memset(&saddr, 0, sizeof(saddr));
- saddr.sun_family = AF_UNIX;
- strcpy(saddr.sun_path, confirm_sock_name);
-
- /* bind the socket */
- if(bind(confirm_sock, (struct sockaddr *)&saddr, sizeof(saddr.sun_path)) < 0) {
- edg_wll_ll_log(LOG_ERR,"init_confirmation(): error binding socket\n");
- SYSTEM_ERROR("bind");
- close(confirm_sock);
- unlink(confirm_sock_name);
- return(-1);
- }
-
- /* and listen */
- if(listen(confirm_sock, 5) < 0) {
- edg_wll_ll_log(LOG_ERR,"init_confirmation(): error listening on socket\n");
- SYSTEM_ERROR("listen");
- close(confirm_sock);
- unlink(confirm_sock_name);
- return(-1);
- }
-
- return(0);
-}
-
-
-int wait_for_confirmation(struct timeval *timeout, int *code)
-{
- fd_set fds;
- struct timeval to,before,after;
- int ret = 0, tmp = 0;
-
- *code = 0;
-
- FD_ZERO(&fds);
- FD_SET(confirm_sock, &fds);
-
- /* set timeout */
- if (timeout) {
- memcpy(&to,timeout,sizeof to);
- gettimeofday(&before,NULL);
- }
-
- /* wait for confirmation at most timeout seconds */
- if ((tmp=select(confirm_sock+1, &fds, NULL, NULL, timeout?&to:NULL)) < 0) {
- edg_wll_ll_log(LOG_ERR,"wait_for_confirmation(): error selecting socket\n");
- SYSTEM_ERROR("select");
- ret = -1;
- } else {
- if (tmp == 0)
- ret = 0;
- else {
- int nsd = accept(confirm_sock, NULL, NULL);
- ret = 1;
- if(nsd < 0) {
- edg_wll_ll_log(LOG_ERR,"wait_for_confirmation(): error accepting a connection on a socket\n");
- SYSTEM_ERROR("accept");
- ret = -1;
- } else {
- if(recv(nsd, code, sizeof(*code), MSG_NOSIGNAL) < 0) {
- edg_wll_ll_log(LOG_ERR,"wait_for_confirmation(): error receiving a message from a socket\n");
- SYSTEM_ERROR("recv");
- ret = -1;
- }
- close(nsd);
- }
- }
- }
- close(confirm_sock);
- unlink(confirm_sock_name);
- if (timeout) {
- gettimeofday(&after,NULL);
- tv_sub(after,before);
- tv_sub(*timeout,after);
- if (timeout->tv_sec < 0) {
- timeout->tv_sec = 0;
- timeout->tv_usec = 0;
- }
- }
- return ret;
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * do_listen - listen on given port
- *
- * Returns: socket handle or -1 if something fails
- *
- * Calls: socket, bind, listen
- *
- * Algorithm:
- *
- *----------------------------------------------------------------------
- */
-int do_listen(int port)
-{
- int ret;
- int sock;
- struct sockaddr_in my_addr;
-
- memset(&my_addr, 0, sizeof(my_addr));
- my_addr.sin_family = AF_INET;
- my_addr.sin_addr.s_addr = INADDR_ANY;
- my_addr.sin_port = htons(port);
-
- sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
- if (sock == -1) {
- edg_wll_ll_log(LOG_ERR,"do_listen(): error creating socket\n");
- SYSTEM_ERROR("socket");
- return -1;
- }
-
- setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
- ret = bind(sock, (struct sockaddr *)&my_addr, sizeof(my_addr));
- if (ret == -1) {
- edg_wll_ll_log(LOG_ERR,"do_listen(): error binding socket\n");
- SYSTEM_ERROR("bind");
- return -1;
- }
-
- ret = listen(sock, 5);
- if (ret == -1) {
- edg_wll_ll_log(LOG_ERR,"do_listen(): error listening on socket\n");
- SYSTEM_ERROR("listen");
- close(sock);
- return -1;
- }
-
- return sock;
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * edg_wll_log_proto_server - handle incoming data
- *
- * Returns: 0 if done properly or errno
- *
- * Calls:
- *
- * Algorithm:
- *
- *----------------------------------------------------------------------
- */
-int edg_wll_log_proto_server(edg_wll_GssConnection *con, char *name, char *prefix, int noipc, int noparse)
-{
- char *buf,*dglllid,*dguser,*jobId,*name_esc;
- char header[EDG_WLL_LOG_SOCKET_HEADER_LENGTH+1];
- char outfilename[FILENAME_MAX];
- size_t count;
- int count_total,size;
- u_int8_t size_end[4];
- size_t msg_size,dglllid_size,dguser_size;
- int i,answer,answer_sent;
- int msg_sock;
- char *msg,*msg_begin;
- FILE *outfile;
- int filedesc,filelock_status,flags;
- long filepos;
- struct flock filelock;
- int priority;
- long lllid;
- int unique;
- struct timeval timeout;
- int err;
- edg_wll_Context context;
- edg_wll_Event *event;
- edg_wll_GssStatus gss_stat;
-
- errno = i = answer = answer_sent = size = msg_size = dglllid_size = dguser_size = count = count_total = msg_sock = filedesc = filelock_status = /* priority */ unique = err = 0;
- buf = dglllid = dguser = jobId = name_esc = msg = msg_begin = NULL;
- event = NULL;
- if (EDG_WLL_LOG_TIMEOUT_MAX > EDG_WLL_LOG_SYNC_TIMEOUT_MAX) {
- timeout.tv_sec = EDG_WLL_LOG_TIMEOUT_MAX;
- } else {
- timeout.tv_sec = EDG_WLL_LOG_SYNC_TIMEOUT_MAX;
- }
- timeout.tv_usec = 0;
- if (edg_wll_InitContext(&context) != 0) {
- edg_wll_ll_log(LOG_ERR,"edg_wll_InitContex(): error.\n");
- answer = ENOMEM;
- goto edg_wll_log_proto_server_end;
- }
- if (edg_wll_ResetError(context) != 0) {
- edg_wll_ll_log(LOG_ERR,"edg_wll_ResetError(): error.\n");
- answer = ENOMEM;
- goto edg_wll_log_proto_server_end;
- }
-
- /* look for the unique unused long local-logger id (LLLID) */
- lllid = 1000*getpid();
- for (i=0; (i<1000)&&(!unique); i++) {
- lllid += i;
- snprintf(confirm_sock_name, sizeof(confirm_sock_name), "/tmp/dglogd_sock_%ld", lllid);
- if ((filedesc = open(confirm_sock_name,O_CREAT)) == -1) {
- if (errno == EEXIST) {
- edg_wll_ll_log(LOG_WARNING,"Warning: LLLID %ld already in use.\n",lllid);
- } else {
- SYSTEM_ERROR("open");
- }
- } else {
- unique = 1;
- close(filedesc); filedesc = 0;
- unlink(confirm_sock_name);
- }
- }
- if (!unique) {
- edg_wll_ll_log(LOG_ERR,"Cannot determine the unique long local-logger id (LLLID)!\n",lllid);
- return EAGAIN;
- }
- edg_wll_ll_log(LOG_INFO,"Long local-logger id (LLLID): %ld\n",lllid);
-
- /* receive socket header */
- edg_wll_ll_log(LOG_INFO,"Reading socket header...");
- memset(header, 0, EDG_WLL_LOG_SOCKET_HEADER_LENGTH+1);
- if ((err = edg_wll_gss_read_full(con, header, EDG_WLL_LOG_SOCKET_HEADER_LENGTH, &timeout, &count, &gss_stat)) < 0) {
- edg_wll_ll_log(LOG_INFO,"error.\n");
- answer = edg_wll_log_proto_server_failure(err,&gss_stat,"Error receiving header");
- goto edg_wll_log_proto_server_end;
- } else {
- edg_wll_ll_log(LOG_INFO,"o.k.\n");
- }
- edg_wll_ll_log(LOG_DEBUG,"Checking socket header...");
- header[EDG_WLL_LOG_SOCKET_HEADER_LENGTH] = '\0';
- if (strncmp(header,EDG_WLL_LOG_SOCKET_HEADER,EDG_WLL_LOG_SOCKET_HEADER_LENGTH)) {
- /* not the proper socket header text */
- edg_wll_ll_log(LOG_DEBUG,"error.\n");
- edg_wll_ll_log(LOG_ERR,"edg_wll_log_proto_server(): invalid socket header\n");
- edg_wll_ll_log(LOG_DEBUG,"edg_wll_log_proto_server(): read header '%s' instead of '%s'\n",
- header,EDG_WLL_LOG_SOCKET_HEADER);
- answer = EINVAL;
- goto edg_wll_log_proto_server_end;
- } else {
- edg_wll_ll_log(LOG_DEBUG,"o.k.\n");
- }
-
-/*
- edg_wll_ll_log(LOG_DEBUG,"Reading message priority...");
- count = 0;
- if ((err = edg_wll_gss_read_full(con, &priority, sizeof(priority), &timeout, &count, &gss_stat)) < 0) {
- edg_wll_ll_log(LOG_DEBUG,"error.\n");
- answer = edg_wll_log_proto_server_failure(err,&gss_stat,"Error receiving message priority");
- goto edg_wll_log_proto_server_end;
- } else {
- edg_wll_ll_log(LOG_DEBUG,"o.k.\n");
- }
-*/
-
- edg_wll_ll_log(LOG_DEBUG,"Reading message size...");
- count = 0;
- if ((err = edg_wll_gss_read_full(con, size_end, 4, &timeout, &count,&gss_stat)) < 0) {
- edg_wll_ll_log(LOG_DEBUG,"error.\n");
- answer = edg_wll_log_proto_server_failure(err,&gss_stat,"Error receiving message size");
- goto edg_wll_log_proto_server_end;
- } else {
- edg_wll_ll_log(LOG_DEBUG,"o.k.\n");
- }
- size = size_end[3]; size <<=8;
- size |= size_end[2]; size <<=8;
- size |= size_end[1]; size <<=8;
- size |= size_end[0];
- edg_wll_ll_log(LOG_DEBUG,"Checking message size...");
- if (size <= 0) {
- edg_wll_ll_log(LOG_DEBUG,"error.\n");
- /* probably wrong size in the header or nothing to read */
- edg_wll_ll_log(LOG_ERR,"edg_wll_log_proto_server(): invalid size read from socket header\n");
- edg_wll_ll_log(LOG_DEBUG,"Read size '%d'.\n",size);
- answer = EINVAL;
- goto edg_wll_log_proto_server_end;
- } else {
- edg_wll_ll_log(LOG_DEBUG,"o.k.\n");
- edg_wll_ll_log(LOG_DEBUG,"- Size read from header: %d bytes.\n",size);
- }
-
- /* format the DG.LLLID string */
- if (asprintf(&dglllid,"DG.LLLID=%ld ",lllid) == -1) {
- edg_wll_ll_log(LOG_ERR,"edg_wll_log_proto_server(): nomem for DG.LLLID\n");
- SYSTEM_ERROR("asprintf");
- answer = ENOMEM;
- goto edg_wll_log_proto_server_end;
- }
- dglllid_size = strlen(dglllid);
-
- /* format the DG.USER string */
- name_esc = edg_wll_LogEscape(name);
- if (asprintf(&dguser,"DG.USER=\"%s\" ",name_esc) == -1) {
- edg_wll_ll_log(LOG_ERR,"edg_wll_log_proto_server(): nomem for DG.USER\n");
- SYSTEM_ERROR("asprintf");
- answer = ENOMEM;
- goto edg_wll_log_proto_server_end;
- }
- dguser_size = strlen(dguser);
-
- /* allocate enough memory for all data */
- msg_size = dglllid_size + dguser_size + size + 1;
- if ((msg = malloc(msg_size)) == NULL) {
- edg_wll_ll_log(LOG_ERR,"edg_wll_log_proto_server(): out of memory for allocating message\n");
- SYSTEM_ERROR("malloc");
- answer = ENOMEM;
- goto edg_wll_log_proto_server_end;
- }
- strncpy(msg,dglllid,dglllid_size);
- msg_begin = msg + dglllid_size; // this is the "official" beginning of the message
- strncpy(msg_begin,dguser,dguser_size);
-
- /* receive message */
- edg_wll_ll_log(LOG_INFO,"Reading message from socket...");
- buf = msg_begin + dguser_size;
- count = 0;
- if ((err = edg_wll_gss_read_full(con, buf, size, &timeout, &count, &gss_stat)) < 0) {
- edg_wll_ll_log(LOG_INFO,"error.\n");
- answer = edg_wll_log_proto_server_failure(err,&gss_stat,"Error receiving message");
- goto edg_wll_log_proto_server_end;
- } else {
- edg_wll_ll_log(LOG_INFO,"o.k.\n");
- }
-
- if (buf[count] != '\0') buf[count] = '\0';
-
- /* parse message and get jobId and priority from it */
- if (!noparse && strstr(msg, "DG.TYPE=\"command\"") == NULL) {
- edg_wll_ll_log(LOG_INFO,"Parsing message for correctness...");
- if (edg_wll_ParseEvent(context,msg_begin,&event) != 0) {
- edg_wll_ll_log(LOG_INFO,"error.\n");
- edg_wll_ll_log(LOG_ERR,"edg_wll_log_proto_server(): edg_wll_ParseEvent error\n");
- edg_wll_ll_log(LOG_ERR,"edg_wll_ParseEvent(): %s\n",context->errDesc);
- answer = edg_wll_Error(context,NULL,NULL);
- goto edg_wll_log_proto_server_end;
- } else {
- edg_wll_ll_log(LOG_INFO,"o.k.\n");
- }
- edg_wll_ll_log(LOG_DEBUG,"Getting jobId from message...");
- jobId = edg_wlc_JobIdGetUnique(event->any.jobId);
- priority = event->any.priority;
- edg_wll_FreeEvent(event);
- event->any.priority = priority;
- edg_wll_ll_log(LOG_DEBUG,"o.k.\n");
- } else {
-/* FIXME: what if edg_wll_InitEvent fails? should be checked somehow -> nomem etc. */
- event = edg_wll_InitEvent(EDG_WLL_EVENT_UNDEF);
-/* XXX:
- event = calloc(1,sizeof(*event));
- if(event == NULL) {
- edg_wll_ll_log(LOG_ERR, "out of memory\n");
- answer = ENOMEM;
- goto edg_wll_log_proto_server_end;
- }
-*/
-
-/* XXX: obsolete, logd now doesn't need jobId for 'command' messages,
- * it will be probably needed for writing 'command' messages to some files
- edg_wll_ll_log(LOG_DEBUG,"Getting jobId from message...");
- jobId = edg_wll_GetJobId(msg);
- if (!jobId || edg_wlc_JobIdParse(jobId,&j)) {
- edg_wll_ll_log(LOG_DEBUG,"error.\n");
- edg_wll_ll_log(LOG_ERR,"ParseJobId(%s)\n",jobId?jobId:"NULL");
- answer = EINVAL;
- goto edg_wll_log_proto_server_end;
- } else {
- edg_wll_ll_log(LOG_DEBUG,"o.k.\n");
- }
- free(jobId);
- jobId = edg_wlc_JobIdGetUnique(j);
- edg_wlc_JobIdFree(j);
-*/
-
-/* FIXME: get the priority from message some better way */
- if (strstr(msg, "DG.PRIORITY=1") != NULL)
- event->any.priority = 1;
- else event->any.priority = 0;
- }
-
-
- /* if not command, save message to file */
-#ifdef LOGD_NOFILE
- edg_wll_ll_log(LOG_DEBUG,"Calling perftest\n");
- glite_wll_perftest_consumeEventString(msg);
- edg_wll_ll_log(LOG_DEBUG,"o.k.\n");
- filepos = 0;
-#else
- if(strstr(msg, "DG.TYPE=\"command\"") == NULL) {
- /* compose the name of the log file */
- edg_wll_ll_log(LOG_DEBUG,"Composing filename from prefix \"%s\" and jobId \"%s\"...",prefix,jobId);
- count = strlen(prefix);
- strncpy(outfilename,prefix,count); count_total=count;
- strncpy(outfilename+count_total,".",1); count_total+=1; count=strlen(jobId);
- strncpy(outfilename+count_total,jobId,count); count_total+=count;
- outfilename[count_total]='\0';
- edg_wll_ll_log(LOG_DEBUG,"o.k.\n");
- edg_wll_ll_log(LOG_INFO,"Writing message to \"%s\"...",outfilename);
-
- i = 0;
-open_event_file:
-
- /* fopen and properly handle the filelock */
- if ( edg_wll_log_event_write(context, outfilename, msg, FCNTL_ATTEMPTS, FCNTL_TIMEOUT, &filepos) ) {
- char *errd;
- answer = edg_wll_Error(context, NULL, &errd);
- edg_wll_ll_log(LOG_INFO,"error.\n");
- SYSTEM_ERROR(errd);
- free(errd);
- goto edg_wll_log_proto_server_end;
- } else edg_wll_ll_log(LOG_INFO,"o.k.");
- } else {
- filepos = 0;
- }
-#endif
-
-
- /* if not priority send now the answer back to client */
- if (!event->any.priority) {
- if (!send_answer_back(con,answer,&timeout)) {
- answer_sent = 1;
- }
- }
-
- /* send message via IPC (UNIX socket) */
- if (!noipc) {
- edg_wll_ll_log(LOG_DEBUG,
- "Sending via IPC (UNIX socket \"%s\")\n\t"
- "the message position %ld (%d bytes)...",
- outfilename, filepos, sizeof(filepos));
-
- if (event->any.priority) {
- edg_wll_ll_log(LOG_DEBUG,"- Initializing 2nd UNIX socket for priority messages confirmation...");
- if(init_confirmation() < 0) {
- edg_wll_ll_log(LOG_DEBUG,"error.\n");
- answer = errno;
- goto edg_wll_log_proto_server_end;
- } else {
- edg_wll_ll_log(LOG_DEBUG,"o.k.\n");
- }
- }
-
- if ( edg_wll_log_event_send(context, socket_path, filepos, msg, msg_size, CONNECT_ATTEMPTS, &timeout) ) {
- char *errd;
- answer = edg_wll_Error(context, NULL, &errd);
- edg_wll_ll_log(LOG_INFO,"error.\n");
- SYSTEM_ERROR(errd);
- free(errd);
- goto edg_wll_log_proto_server_end_1;
- } else edg_wll_ll_log(LOG_INFO,"o.k.");
-
- if (event->any.priority) {
- edg_wll_ll_log(LOG_INFO,"Waiting for confirmation...");
- if ((count = wait_for_confirmation(&timeout, &answer)) < 0) {
- edg_wll_ll_log(LOG_INFO,"error.\n");
- edg_wll_ll_log(LOG_ERR,"wait_for_confirmation(): error.\n");
- answer = errno;
- } else {
- edg_wll_ll_log(LOG_INFO,"o.k.\n");
- if (count == 0) {
- edg_wll_ll_log(LOG_DEBUG,"Waking up, timeout expired.\n");
- answer = EAGAIN;
- } else {
- edg_wll_ll_log(LOG_DEBUG,"Confirmation received, waking up.\n");
- }
- }
- }
- } else {
- edg_wll_ll_log(LOG_NOTICE,"Not sending via IPC.\n");
- }
-
-edg_wll_log_proto_server_end:
- /* if not sent already, send the answer back to client */
- if (!answer_sent) {
- answer = send_answer_back(con,answer,&timeout);
- }
- /* clean */
- edg_wll_FreeContext(context);
- if (name_esc) free(name_esc);
- if (dglllid) free(dglllid);
- if (dguser) free(dguser);
- if (jobId) free(jobId);
- if (msg) free(msg);
- if (event) free(event);
-
- edg_wll_ll_log(LOG_INFO,"Done.\n");
-
- return answer;
-
-edg_wll_log_proto_server_end_1:
- if (event->any.priority) {
- close(confirm_sock);
- unlink(confirm_sock_name);
- }
- goto edg_wll_log_proto_server_end;
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * edg_wll_log_proto_server_failure - handle protocol failures on the server side
- *
- * Returns: errno
- *
- *----------------------------------------------------------------------
- */
-int edg_wll_log_proto_server_failure(int code, edg_wll_GssStatus *gss_code, const char *text)
-{
- const char *func = "edg_wll_log_proto_server()";
- int ret = 0;
-
- if(code>0) {
- return(0);
- }
- switch(code) {
- case EDG_WLL_GSS_ERROR_EOF:
- edg_wll_ll_log(LOG_ERR,"%s: %s, EOF occured\n", func, text);
- ret = EAGAIN;
- break;
- case EDG_WLL_GSS_ERROR_TIMEOUT:
- edg_wll_ll_log(LOG_ERR,"%s: %s, timeout expired\n", func, text);
- ret = EAGAIN;
- break;
- /* XXX DK: co tenhle break??: */
- case EDG_WLL_GSS_ERROR_ERRNO: perror("edg_wll_gss_read()"); break;
- edg_wll_ll_log(LOG_ERR,"%s: %s, system error occured\n", func, text);
- ret = EAGAIN;
- break;
- case EDG_WLL_GSS_ERROR_GSS:
- {
- char *gss_err;
-
- edg_wll_gss_get_error(gss_code, "GSS error occured", &gss_err);
- edg_wll_ll_log(LOG_ERR,"%s: %s, %s\n", func, text, gss_err);
- free(gss_err);
- ret = EAGAIN;
- break;
- }
- default:
- edg_wll_ll_log(LOG_ERR,"%s: %s, unknown error occured\n");
- break;
- }
- return ret;
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * edg_wll_ll_log_init - initialize the logging level
- *
- *----------------------------------------------------------------------
- */
-void edg_wll_ll_log_init(int level) {
- edg_wll_ll_log_level = level;
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * edg_wll_ll_log - print to stderr according to logging level
- * serious messages are also written to syslog
- *
- *----------------------------------------------------------------------
- */
-void edg_wll_ll_log(int level, const char *fmt, ...) {
- char *err_text;
- va_list fmt_args;
-
- va_start(fmt_args, fmt);
- vasprintf(&err_text, fmt, fmt_args);
- va_end(fmt_args);
-
- if(level <= edg_wll_ll_log_level)
- fprintf(stderr, "[%d] %s", (int) getpid(), err_text);
- if(level <= LOG_ERR) {
- openlog("edg-wl-logd", LOG_PID | LOG_CONS, LOG_DAEMON);
- syslog(level, "%s", err_text);
- closelog();
- }
-
- if(err_text) free(err_text);
-
- return;
-}
+++ /dev/null
-#ifndef __EDG_WORKLOAD_LOGGING_LOCALLOGGER_LOGD_PROTO_H__
-#define __EDG_WORKLOAD_LOGGING_LOCALLOGGER_LOGD_PROTO_H__
-
-#ident "$Header$"
-
-/**
- * \file edg/workload/logging/locallogger/logd_proto.h
- * \brief server part of the logging protocol
- * \note private
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-#include <syslog.h>
-
-#include "glite/lb/log_proto.h"
-#include "glite/security/glite_gss.h"
-
-int edg_wll_log_proto_server(edg_wll_GssConnection *con, char *name, char *prefix, int noipc, int noparse);
-int edg_wll_log_proto_server_failure(int code, edg_wll_GssStatus *gss_code, const char *text);
-
-#define SYSTEM_ERROR(my_err) { \
- if (errno !=0 ) \
- edg_wll_ll_log(LOG_ERR,"%s: %s\n",my_err,strerror(errno)); \
- else \
- edg_wll_ll_log(LOG_ERR,"%s\n",my_err); }
-
-/* locallogger daemon error handling */
-
-extern int edg_wll_ll_log_level;
-void edg_wll_ll_log_init(int level);
-void edg_wll_ll_log(int level, const char *fmt, ...);
-
-
-/* fcntl defaults */
-
-#define FCNTL_ATTEMPTS 5
-#define FCNTL_TIMEOUT 1
-
-
-/* connect defaults */
-
-#define CONNECT_ATTEMPTS 5
-
-
-/* locallogger daemon listen and connect functions prototypes */
-
-int do_listen(int port);
-int do_connect(char *hostname, int port);
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __EDG_WORKLOAD_LOGGING_LOCALLOGGER_LOGD_PROTO_H__ */
+++ /dev/null
-#!/bin/bash
-
-numjobs=$1
-
-# XXX - there must be better way to find stage
-STAGEDIR=/home/michal/shared/egee/jra1-head/stage
-. $STAGEDIR/sbin/perftest_common.sh
-
-DEBUG=${DEBUG:-0}
-# CONSUMER_ARGS=
-# PERFTEST_COMPONENT=
-# COMPONENT_ARGS=
-#LOGJOBS_ARGS=""
-
-check_test_files || exit 1
-
-PERFTEST_CONSUMER=$STAGEDIR/bin/glite-lb-interlogd-perf-empty
-CONSUMER_ARGS="-d -v"
-
-echo -e "\tsmall_job \t big_job \t small_dag \t big_dag"
-run_test il $numjobs
-j=0
-while [[ $j -lt 4 ]]
-do
- echo -e -n "\t ${PERFTEST_THROUGHPUT[$j]}"
- j=$((j+1))
-done
-echo ""
-#j=0
-#while [[ $j -lt 4 ]]
-#do
-# echo -e -n "\t (${PERFTEST_EV_THROUGHPUT[$j]})"
-# j=$((j+1))
-#done
-#echo ""
-
-
-#
-# dst=il
-#
-## i)1)
-#
-# glite_lb_interlogd_perf_noparse --nosend
-# run_test()
-#
-# glite_lb_interlogd_perf_nosync --nosend
-# run_test()
-#
-# glite_lb_interlogd_perf_norecover --nosend
-# run_test()
-#
-# glite_lb_interlogd_perf --nosend
-# run_test()
-#
-## ii)1)
-#
-#glite_lb_bkserverd_perf_empty
-#
-# glite_lb_interlogd_perf_noparse
-# run_test()
-#
-# glite_lb_interlogd_perf_nosync
-# run_test()
-#
-# glite_lb_interlogd_perf_norecover
-# run_test()
-#
-# glite_lb_interlogd_perf_lazy
-# run_test()
-#
-# glite_lb_interlogd_perf
-# run_test()
+++ /dev/null
-#!/bin/bash
-
-numjobs=$1
-
-# XXX - there must be better way to find stage
-if [ -z "${GLITE_LOCATION}" ]; then
- STAGEDIR=/home/michal/shared/egee/jra1-head/stage
-else
- STAGEDIR=${GLITE_LOCATION}
-fi
-
-. $STAGEDIR/sbin/perftest_common.sh
-
-DEBUG=${DEBUG:-0}
-# CONSUMER_ARGS=
-# PERFTEST_COMPONENT=
-# COMPONENT_ARGS=
-#LOGJOBS_ARGS=""
-
-check_test_files || exit 1
-
-PERFTEST_CONSUMER=$STAGEDIR/bin/glite-lb-logd-perf-nofile
-CONSUMER_ARGS="-d -v --noIPC --noParse"
-
-echo -e "\tsmall_job \t big_job \t small_dag \t big_dag"
-run_test ll $numjobs
-j=0
-while [[ $j -lt 4 ]]
-do
- echo -e -n "\t ${PERFTEST_EVENT_THROUGHPUT[$j]}"
- j=$((j+1))
-done
-echo ""
-
+++ /dev/null
-#ident "$Header$"
-
-#include <string.h>
-#include <errno.h>
-#include <assert.h>
-
-#include "glite/lb/consumer.h"
-
-#include "interlogd.h"
-
-struct queue_list {
- struct event_queue *queue;
- char *dest;
- struct queue_list *next;
-};
-
-static struct event_queue *log_queue;
-static struct queue_list *queues;
-
-
-static
-int
-queue_list_create()
-{
- queues = NULL;
-
- return(0);
-}
-
-
-static
-int
-queue_list_find(struct queue_list *ql, const char *dest, struct queue_list **el, struct queue_list **prev)
-{
- struct queue_list *q, *p;
-
- assert(el != NULL);
-
- *el = NULL;
- if(prev)
- *prev = NULL;
-
- if(ql == NULL)
- return(0);
-
- q = NULL;
- p = ql;
-
- while(p) {
- if(strcmp(p->dest, dest) == 0) {
- *el = p;
- if(prev)
- *prev = q;
- return(1);
- }
-
- q = p;
- p = p->next;
- };
-
- return(0);
-}
-
-
-static
-int
-queue_list_add(struct queue_list **ql, const char *dest, struct event_queue *eq)
-{
- struct queue_list *el;
-
- assert(dest != NULL);
- assert(eq != NULL);
- assert(ql != NULL);
-
- el = malloc(sizeof(*el));
- if(el == NULL) {
- set_error(IL_NOMEM, ENOMEM, "queue_list_add: not enough room for new queue");
- return(-1);
- }
-
- el->dest = strdup(dest);
- if(el->dest == NULL) {
- free(el);
- set_error(IL_NOMEM, ENOMEM, "queue_list_add: not enough memory for new queue");
- return(-1);
- }
- el->queue = eq;
- el->next = queues;
- *ql = el;
- return 0;
-}
-
-
-/*
-static
-int
-queue_list_remove(struct queue_list *el, struct queue_list *prev)
-{
- assert(el != NULL);
-
- if(prev)
- prev->next = el->next;
- else
- queues = el->next;
-
- free(el);
- return(1);
-}
-*/
-
-
-#if !defined(IL_NOTIFICATIONS)
-static
-char *
-jobid2dest(edg_wlc_JobId jobid)
-{
- char *server_name,*out;
- unsigned int server_port;
-
- if (!jobid) {
- set_error(IL_PROTO, EDG_WLL_ERROR_PARSE_BROKEN_ULM, "jobid2dest: invalid job id");
- return(NULL);
- }
- edg_wlc_JobIdGetServerParts(jobid,&server_name,&server_port);
-
- asprintf(&out,"%s:%d",server_name,server_port);
- free(server_name);
- if(!out)
- set_error(IL_SYS, ENOMEM, "jobid2dest: error creating server name");
- return(out);
-}
-#endif
-
-struct event_queue *
-queue_list_get(char *job_id_s)
-{
- char *dest;
- struct queue_list *q;
- struct event_queue *eq;
-#if !defined(IL_NOTIFICATIONS)
- IL_EVENT_ID_T job_id;
-
- if(job_id_s == NULL || strcmp(job_id_s, "default") == 0)
- return(log_queue);
-
- if(edg_wlc_JobIdParse(job_id_s, &job_id)) {
- set_error(IL_LBAPI, EDG_WLL_ERROR_PARSE_BROKEN_ULM, "queue_list_get: invalid job id");
- return(NULL);
- }
-
- dest = jobid2dest(job_id);
- edg_wlc_JobIdFree(job_id);
-#else
- dest = job_id_s;
-#endif
-
- if(dest == NULL)
- return(NULL);
-
- if(queue_list_find(queues, dest, &q, NULL)) {
-#if !defined(IL_NOTIFICATIONS)
- free(dest);
-#endif
- return(q->queue);
- } else {
- eq = event_queue_create(dest);
- if(eq)
- queue_list_add(&queues, dest, eq);
-#if !defined(IL_NOTIFICATIONS)
- free(dest);
-#endif
- return(eq);
- }
-}
-
-
-int
-queue_list_is_log(struct event_queue *eq)
-{
- return(eq == queue_list_get(NULL));
-}
-
-
-int
-queue_list_init(char *ls)
-{
-#if !defined(IL_NOTIFICATIONS)
- /* create queue for log server */
- log_queue = event_queue_create(ls);
- if(log_queue == NULL)
- return(-1);
-#endif
-
- return(queue_list_create());
-}
-
-
-static struct queue_list *current;
-
-
-struct event_queue *
-queue_list_first()
-{
- current = queues;
- return(current ? current->queue : NULL);
-}
-
-
-struct event_queue *
-queue_list_next()
-{
- current = current ? current->next : NULL;
- return(current ? current->queue : NULL);
-}
-
-#if defined(IL_NOTIFICATIONS)
-
-static struct queue_list *notifid_map = NULL;
-
-struct event_queue *
-notifid_map_get_dest(const char * notif_id)
-{
- struct queue_list *q = NULL;
-
- queue_list_find(notifid_map, notif_id, &q, NULL);
- return(q ? q->queue : NULL);
-}
-
-
-/* returns 1 if mapping was changed, 0 if new one had to be created, -1 on error */
-int
-notifid_map_set_dest(const char *notif_id, struct event_queue *eq)
-{
- struct queue_list *q;
-
- if(queue_list_find(notifid_map, notif_id, &q, NULL)) {
- q->queue = eq;
- return(1);
- } else {
- return(queue_list_add(¬ifid_map, notif_id, eq));
- }
-}
-
-#endif
+++ /dev/null
-#ident "$Header$"
-
-#include <stdio.h>
-#include <assert.h>
-#include <errno.h>
-#include <signal.h>
-
-#include "interlogd.h"
-
-static
-void
-queue_thread_cleanup(void *q)
-{
- struct event_queue *eq = (struct event_queue *)q;
-
- il_log(LOG_WARNING, "thread %d exits\n", eq->thread_id);
-
- /* unlock all held locks */
- /* FIXME: check that the thread always exits when holding these locks;
- unlock them at appropriate places if this condition is not met
- event_queue_unlock(eq);
- event_queue_cond_unlock(eq);
- */
-
- /* clear thread id */
- eq->thread_id = 0;
-}
-
-
-static
-void *
-queue_thread(void *q)
-{
- struct event_queue *eq = (struct event_queue *)q;
- int ret, exit;
-
- if(init_errors(0) < 0) {
- il_log(LOG_ERR, "Error initializing thread specific data, exiting!");
- pthread_exit(NULL);
- }
-
- il_log(LOG_DEBUG, " started new thread for delivery to %s:%d\n", eq->dest_name, eq->dest_port);
-
- pthread_cleanup_push(queue_thread_cleanup, q);
-
- event_queue_cond_lock(eq);
-
- exit = 0;
- while(!exit) {
-
- clear_error();
-
- /* if there are no events, wait for them */
- ret = 0;
- while (event_queue_empty(eq)
-#if defined(INTERLOGD_HANDLE_CMD) && defined(INTERLOGD_FLUSH)
- && (eq->flushing != 1)
-#endif
- ) {
- ret = event_queue_wait(eq, 0);
- if(ret < 0) {
- /* error waiting */
- il_log(LOG_ERR, "queue_thread: %s\n", error_get_msg());
- event_queue_cond_unlock(eq);
- pthread_exit((void*)-1);
- }
- } /* END while(empty) */
-
- il_log(LOG_DEBUG, " attempting delivery to %s:%d\n", eq->dest_name, eq->dest_port);
-
- /* allow other threads to signal us, ie. insert new events while
- * we are sending or request flush operation
- */
- event_queue_cond_unlock(eq);
-
- /* connect to server */
- if((ret=event_queue_connect(eq)) == 0) {
- /* not connected */
- if(error_get_maj() != IL_OK)
- il_log(LOG_ERR, "queue_thread: %s\n", error_get_msg());
-#if defined(IL_NOTIFICATIONS)
- il_log(LOG_INFO, " could not connect to client %s, waiting for retry\n", eq->dest_name);
-#else
- il_log(LOG_INFO, " could not connect to bookkeeping server %s, waiting for retry\n", eq->dest_name);
-#endif
- } else {
- /* connected, send events */
- switch(ret=event_queue_send(eq)) {
-
- case 0:
- /* there was an error and we still have events to send */
- if(error_get_maj() != IL_OK)
- il_log(LOG_ERR, "queue_thread: %s\n", error_get_msg());
- il_log(LOG_DEBUG, " events still waiting\n");
- break;
-
- case 1:
- /* hey, we are done for now */
- il_log(LOG_DEBUG, " all events for %s sent\n", eq->dest_name);
- break;
-
- default:
- /* internal error */
- il_log(LOG_ERR, "queue_thread: %s\n", error_get_msg());
- exit = 1;
- break;
-
- } /* switch */
-
- /* we are done for now anyway, so close the queue */
- event_queue_close(eq);
- }
-
-#if defined(INTERLOGD_HANDLE_CMD) && defined(INTERLOGD_FLUSH)
- if(pthread_mutex_lock(&flush_lock) < 0)
- abort();
- event_queue_cond_lock(eq);
-
- /* Check if we are flushing and if we are, report status to master */
- if(eq->flushing == 1) {
- il_log(LOG_DEBUG, " flushing mode detected, reporting status\n");
- /* 0 - events waiting, 1 - events sent, < 0 - some error */
- eq->flush_result = ret;
- eq->flushing = 2;
- if(pthread_cond_signal(&flush_cond) < 0)
- abort();
- }
- if(pthread_mutex_unlock(&flush_lock) < 0)
- abort();
-#else
- event_queue_cond_lock(eq);
-#endif
-
- /* if there was some error with server, sleep for a while */
- /* iff !event_queue_empty() */
- if(ret == 0)
- event_queue_sleep(eq);
-
- if(exit) {
- /* we have to clean up before exiting */
- event_queue_cond_unlock(eq);
- }
-
- } /* while */
-
- pthread_cleanup_pop(1);
-
- return(eq);
-}
-
-
-int
-event_queue_create_thread(struct event_queue *eq)
-{
- assert(eq != NULL);
-
- event_queue_lock(eq);
-
- /* if there is a thread already, just return */
- if(eq->thread_id > 0) {
- event_queue_unlock(eq);
- return(0);
- }
-
- /* create the thread itself */
- if(pthread_create(&eq->thread_id, NULL, queue_thread, eq) < 0) {
- eq->thread_id = 0;
- set_error(IL_SYS, errno, "event_queue_create_thread: error creating new thread");
- event_queue_unlock(eq);
- return(-1);
- }
-
- /* the thread is never going to be joined */
- pthread_detach(eq->thread_id);
-
- event_queue_unlock(eq);
-
- return(1);
-}
-
-
-
-int
-event_queue_lock(struct event_queue *eq)
-{
- assert(eq != NULL);
-
- if(pthread_rwlock_wrlock(&eq->update_lock)) {
- /*** abort instead, this is too serious
- set_error(IL_SYS, errno, "event_queue_lock: error acquiring write lock");
- return(-1);
- */
- abort();
- }
-
- return(0);
-}
-
-
-int
-event_queue_lock_ro(struct event_queue *eq)
-{
- assert(eq != NULL);
-
- if(pthread_rwlock_rdlock(&eq->update_lock)) {
- /*** abort instead, this is too serious
- set_error(IL_SYS, errno, "event_queue_lock_ro: error acquiring read lock");
- return(-1);
- */
- abort();
- }
-
- return(0);
-}
-
-
-int
-event_queue_unlock(struct event_queue *eq)
-{
- assert(eq != NULL);
-
- if(pthread_rwlock_unlock(&eq->update_lock)) {
- /*** abort instead, this is too serious
- set_error(IL_SYS, errno, "event_queue_unlock: error releasing lock");
- return(-1);
- */
- abort();
- }
-
- return(0);
-}
-
-
-int
-event_queue_signal(struct event_queue *eq)
-{
- assert(eq != NULL);
-
- if(pthread_cond_signal(&eq->ready_cond)) {
- /*** abort instead, this is too serious
- set_error(IL_SYS, errno, "event_queue_signal: error signaling queue thread");
- return(-1);
- */
- abort();
- }
- return(0);
-}
-
-
-int
-event_queue_wait(struct event_queue *eq, int timeout)
-{
- assert(eq != NULL);
-
- if(timeout) {
- struct timespec endtime;
- int ret = 0;
-
- endtime.tv_sec = time(NULL) + timeout;
- endtime.tv_nsec = 0;
-
- if((ret=pthread_cond_timedwait(&eq->ready_cond, &eq->cond_lock, &endtime))) {
- if(ret == ETIMEDOUT)
- return(1);
- /*** abort instead, this is too serious
- set_error(IL_SYS, errno, "event_queue_wait: error waiting on condition variable");
- return(-1);
- */
- abort();
- }
- } else {
- if(pthread_cond_wait(&eq->ready_cond, &eq->cond_lock)) {
- /*** abort instead, this is too serious
- set_error(IL_SYS, errno, "event_queue_wait: error waiting on condition variable");
- return(-1);
- */
- abort();
- }
- }
- return(0);
-}
-
-
-int event_queue_sleep(struct event_queue *eq)
-{
-#if defined(INTERLOGD_HANDLE_CMD) && defined(INTERLOGD_FLUSH)
- struct timespec ts;
- struct timeval tv;
- int ret;
-
- assert(eq != NULL);
-
- gettimeofday(&tv, NULL);
- ts.tv_sec = tv.tv_sec + eq->timeout;
- ts.tv_nsec = 1000 * tv.tv_usec;
- if((ret=pthread_cond_timedwait(&eq->flush_cond, &eq->cond_lock, &ts)) < 0) {
- if(ret != ETIMEDOUT) {
- /*** abort instead, this is too serious
- set_error(IL_SYS, errno, "event_queue_sleep: error waiting on condition");
- return(-1);
- */
- abort();
- }
- }
-#else
- sleep(eq->timeout);
-#endif
- return(0);
-}
-
-
-#if defined(INTERLOGD_HANDLE_CMD)
-int event_queue_wakeup(struct event_queue *eq)
-{
- assert(eq != NULL);
-
- if(pthread_cond_signal(&eq->ready_cond)) {
- /**
- set_error(IL_SYS, errno, "event_queue_wakeup: error signaling queue thread");
- return(-1);
- */
- abort();
- }
-#if defined(INTERLOGD_FLUSH)
- if(pthread_cond_signal(&eq->flush_cond)) {
- /**
- set_error(IL_SYS, errno, "event_queue_wakeup: error signaling queue thread");
- return(-1);
- */
- abort();
- }
-#endif
- return(0);
-}
-#endif
-
-int event_queue_cond_lock(struct event_queue *eq)
-{
- assert(eq != NULL);
-
- if(pthread_mutex_lock(&eq->cond_lock)) {
- /**
- set_error(IL_SYS, errno, "event_queue_cond_lock: error locking condition mutex");
- return(-1);
- */
- abort();
- }
-
- return(0);
-}
-
-
-int event_queue_cond_unlock(struct event_queue *eq)
-{
- assert(eq != NULL);
-
- if(pthread_mutex_unlock(&eq->cond_lock)) {
- /**
- set_error(IL_SYS, errno, "event_queue_cond_unlock: error locking condition mutex");
- return(-1);
- */
- abort();
- }
-
- return(0);
-}
+++ /dev/null
-#ident "$Header$"
-
-#include <stdio.h>
-#include <assert.h>
-#include <errno.h>
-
-#include "interlogd.h"
-
-extern char *file_prefix;
-
-extern time_t cert_mtime, key_mtime;
-
-void *
-recover_thread(void *q)
-{
- if(init_errors(0) < 0) {
- il_log(LOG_ERR, "Error initializing thread specific data, exiting!");
- pthread_exit(NULL);
- }
-
- while(1) {
- il_log(LOG_INFO, "Looking up event files...\n");
- if(event_store_init(file_prefix) < 0) {
- il_log(LOG_ERR, "recover_thread: %s\n", error_get_msg());
- exit(1);
- }
- if(event_store_recover_all() < 0) {
- il_log(LOG_ERR, "recover_thread: %s\n", error_get_msg());
- exit(1);
- }
- if(event_store_cleanup() < 0) {
- il_log(LOG_ERR, "recover_thread: %s\n", error_get_msg());
- exit(1);
- }
- il_log(LOG_INFO, "Checking for new certificate...\n");
- if(pthread_mutex_lock(&cred_handle_lock) < 0)
- abort();
- if (edg_wll_gss_watch_creds(cert_file, &cert_mtime) > 0) {
- gss_cred_id_t new_cred_handle = GSS_C_NO_CREDENTIAL;
- OM_uint32 min_stat;
- int ret;
-
- ret = edg_wll_gss_acquire_cred_gsi(cert_file,key_file,
- &new_cred_handle, NULL, NULL);
- if (new_cred_handle != GSS_C_NO_CREDENTIAL) {
- gss_release_cred(&min_stat, &cred_handle);
- cred_handle = new_cred_handle;
- il_log(LOG_INFO, "New certificate found and deployed.\n");
- }
- }
- if(pthread_mutex_unlock(&cred_handle_lock) < 0)
- abort();
- sleep(INPUT_TIMEOUT);
- }
-}
+++ /dev/null
-#ident "$Header$"
-
-#include <assert.h>
-#include <errno.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#include <fcntl.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <sys/un.h>
-
-
-/*
- * - L/B server protocol handling routines
- */
-
-#include "glite/wmsutils/jobid/cjobid.h"
-#include "glite/lb/il_string.h"
-#include "glite/lb/context.h"
-
-#include "interlogd.h"
-
-#if defined(INTERLOGD_EMS) || (defined(INTERLOGD_HANDLE_CMD) && defined(INTERLOGD_FLUSH))
-/*
- * Send confirmation to client.
- *
- */
-int
-send_confirmation(long lllid, int code)
-{
- struct sockaddr_un saddr;
- char sname[256];
- int sock, ret;
-
- if((sock=socket(PF_UNIX, SOCK_STREAM, 0)) < 0) {
- set_error(IL_SYS, errno, "send_confirmation: error creating socket");
- return(-1);
- }
-
- if(fcntl(sock, F_SETFL, O_NONBLOCK) < 0) {
- set_error(IL_SYS, errno, "send_confirmation: error setting socket options");
- return(-1);
- }
-
- ret = 0;
- memset(&saddr, 0, sizeof(saddr));
- saddr.sun_family = AF_UNIX;
- snprintf(sname, sizeof(sname), "/tmp/dglogd_sock_%ld", lllid);
- strcpy(saddr.sun_path, sname);
- if(connect(sock, (struct sockaddr *)&saddr, sizeof(saddr.sun_path)) < 0) {
- set_error(IL_SYS, errno, "send_confirmation: error connecting socket");
- goto out;
- }
-
- if(send(sock, &code, sizeof(code), MSG_NOSIGNAL) < 0) {
- set_error(IL_SYS, errno, "send_confirmation: error sending data");
- goto out;
- }
- ret = 1;
-
- il_log(LOG_DEBUG, " sent code %d back to client\n", code);
-
- out:
- close(sock);
- return(ret);
-}
-
-
-static
-int
-confirm_msg(struct server_msg *msg, int code, int code_min)
-{
- switch(code) {
- case LB_OK:
- code_min = 0;
- break;
- case LB_DBERR:
- /* code_min already contains apropriate error code */
- break;
- case LB_PROTO:
- code_min = EDG_WLL_IL_PROTO;
- break;
- default:
- code_min = EDG_WLL_IL_SYS;
- break;
- }
-
- return(send_confirmation(msg->receipt_to, code_min));
-}
-#endif
-
-
-
-struct reader_data {
- edg_wll_GssConnection *gss;
- struct timeval *timeout;
-};
-
-
-static
-int
-gss_reader(void *user_data, char *buffer, int max_len)
-{
- int ret, len;
- struct reader_data *data = (struct reader_data *)user_data;
- edg_wll_GssStatus gss_stat;
-
- ret = edg_wll_gss_read_full(data->gss, buffer, max_len, data->timeout, &len, &gss_stat);
- if(ret < 0) {
- char *gss_err = NULL;
-
- if(ret == EDG_WLL_GSS_ERROR_GSS) {
- edg_wll_gss_get_error(&gss_stat, "get_reply", &gss_err);
- set_error(IL_DGGSS, ret, gss_err);
- free(gss_err);
- } else
- set_error(IL_DGGSS, ret, "get_reply");
- }
- return(ret);
-}
-
-
-/*
- * Read reply from server.
- * Returns: -1 - error reading message,
- * code > 0 - error code from server
- */
-static
-int
-get_reply(struct event_queue *eq, char **buf, int *code_min)
-{
- char *msg=NULL;
- int ret, code;
- int len, l;
- struct timeval tv;
- struct reader_data data;
-
- tv.tv_sec = TIMEOUT;
- tv.tv_usec = 0;
- data.gss = &eq->gss;
- data.timeout = &tv;
- len = read_il_data(&data, &msg, gss_reader);
- if(len < 0) {
- set_error(IL_PROTO, LB_PROTO, "get_reply: error reading server reply");
- return(-1);
- }
- ret = decode_il_reply(&code, code_min, buf, msg);
- if(msg) free(msg);
- if(ret < 0) {
- set_error(IL_PROTO, LB_PROTO, "get_reply: error decoding server reply");
- return(-1);
- }
- return(code);
-}
-
-
-
-/*
- * Returns: 0 - not connected, timeout set, 1 - OK
- */
-int
-event_queue_connect(struct event_queue *eq)
-{
- int ret;
- struct timeval tv;
- edg_wll_GssStatus gss_stat;
-
- assert(eq != NULL);
-
-#ifdef LB_PERF
- if(!nosend) {
-#endif
-
- if(eq->gss.context == GSS_C_NO_CONTEXT) {
-
- tv.tv_sec = TIMEOUT;
- tv.tv_usec = 0;
- if(pthread_mutex_lock(&cred_handle_lock) < 0)
- abort();
- il_log(LOG_DEBUG, " trying to connect to %s:%d\n", eq->dest_name, eq->dest_port);
- ret = edg_wll_gss_connect(cred_handle, eq->dest_name, eq->dest_port, &tv, &eq->gss, &gss_stat);
- if(pthread_mutex_unlock(&cred_handle_lock) < 0)
- abort();
- if(ret < 0) {
- char *gss_err = NULL;
-
- if (ret == EDG_WLL_GSS_ERROR_GSS)
- edg_wll_gss_get_error(&gss_stat, "event_queue_connect: edg_wll_gss_connect", &gss_err);
- set_error(IL_DGGSS, ret,
- (ret == EDG_WLL_GSS_ERROR_GSS) ? gss_err : "event_queue_connect: edg_wll_gss_connect");
- if (gss_err) free(gss_err);
- eq->gss.context = GSS_C_NO_CONTEXT;
- eq->timeout = TIMEOUT;
- return(0);
- }
- }
-
-#ifdef LB_PERF
- }
-#endif
-
- return(1);
-}
-
-
-int
-event_queue_close(struct event_queue *eq)
-{
- assert(eq != NULL);
-
-#ifdef LB_PERF
- if(!nosend) {
-#endif
-
- if(eq->gss.context != GSS_C_NO_CONTEXT) {
- edg_wll_gss_close(&eq->gss, NULL);
- eq->gss.context = GSS_C_NO_CONTEXT;
- }
-#ifdef LB_PERF
- }
-#endif
- return(0);
-}
-
-
-/*
- * Send all events from the queue.
- * Returns: -1 - system error, 0 - not send, 1 - queue empty
- */
-int
-event_queue_send(struct event_queue *eq)
-{
- assert(eq != NULL);
-
-#ifdef LB_PERF
- if(!nosend) {
-#endif
- if(eq->gss.context == GSS_C_NO_CONTEXT)
- return(0);
-#ifdef LB_PERF
- }
-#endif
-
- /* feed the server with events */
- while (!event_queue_empty(eq)) {
- struct server_msg *msg;
- char *rep;
- int ret, code, code_min;
- size_t bytes_sent;
- struct timeval tv;
- edg_wll_GssStatus gss_stat;
-
- clear_error();
-
- if(event_queue_get(eq, &msg) < 0)
- return(-1);
-
- il_log(LOG_DEBUG, " trying to deliver event at offset %d for job %s\n", msg->offset, msg->job_id_s);
-
-#ifdef LB_PERF
- if(!nosend) {
-#endif
- tv.tv_sec = TIMEOUT;
- tv.tv_usec = 0;
- ret = edg_wll_gss_write_full(&eq->gss, msg->msg, msg->len, &tv, &bytes_sent, &gss_stat);
- if(ret < 0) {
- eq->timeout = TIMEOUT;
- return(0);
- }
-
- if((code = get_reply(eq, &rep, &code_min)) < 0) {
- /* could not get the reply properly, so try again later */
- il_log(LOG_ERR, " error reading server %s reply:\n %s\n", eq->dest_name, error_get_msg());
- eq->timeout = TIMEOUT;
- return(0);
- }
-#ifdef LB_PERF
- } else {
- glite_wll_perftest_consumeEventIlMsg(msg->msg+17);
- code = LB_OK;
- }
-#endif
-
- il_log(LOG_DEBUG, " event sent, server %s replied with %d, %s\n", eq->dest_name, code, rep);
- free(rep);
-
- /* the reply is back here */
- switch(code) {
-
- /* NOT USED: case LB_TIME: */
- case LB_NOMEM:
- /* NOT USED: case LB_SYS: */
- /* NOT USED: case LB_AUTH: */
- /* non fatal errors (for us) */
- eq->timeout = TIMEOUT;
- return(0);
-
- case LB_OK:
- /* event succesfully delivered */
-
- default: /* LB_DBERR, LB_PROTO */
- /* the event was not accepted by the server */
- /* update the event pointer */
- if(event_store_commit(msg->es, msg->ev_len, queue_list_is_log(eq)) < 0)
- /* failure committing message, this is bad */
- return(-1);
- /* if we have just delivered priority message from the queue, send confirmation */
- ret = 1;
-#if defined(INTERLOGD_EMS)
- if(server_msg_is_priority(msg) &&
- ((ret=confirm_msg(msg, code, code_min)) < 0))
- return(ret);
-#endif
-
- if((ret == 0) &&
- (error_get_maj() != IL_OK))
- il_log(LOG_ERR, "send_event: %s\n", error_get_msg());
-
- event_queue_remove(eq);
- break;
-
- } /* switch */
- } /* while */
-
- return(1);
-
-} /* send_events */
-
-
+++ /dev/null
-#ident "$Header$"
-
-#include <errno.h>
-#include <assert.h>
-#include <string.h>
-
-#include "interlogd.h"
-#include "glite/lb/il_msg.h"
-#include "glite/lb/events_parse.h"
-#include "glite/lb/consumer.h"
-#include "glite/lb/context.h"
-
-static
-int
-create_msg(il_octet_string_t *ev, char **buffer, long *receipt)
-{
- char *p; int len;
- char *event = ev->data;
-
- *receipt = 0;
-
-#if defined(INTERLOGD_EMS)
- /* find DG.LLLID */
- if(strncmp(event, "DG.LLLID",8) == 0 ||
- strncmp(event, "DG.LLPID",8) == 0) { /* 8 == strlen("DG.LLLID") */
-
- /* skip the key */
- event += 9; /* 9 = strlen("DG.LLLID=") */
- *receipt = atol(event);
- p = strchr(event, ' ');
- if(!p) {
- set_error(IL_LBAPI, EDG_WLL_ERROR_PARSE_BROKEN_ULM,
- "create_msg: error parsing locallogger PID");
- return(-1);
- }
- /* skip the value */
- event = p + 1;
-
- /* find DG.PRIORITY */
- p = strstr(event, "DG.PRIORITY");
- if(p) {
- int n;
-
- p += 12; /* skip the key and = */
- if((n = atoi(p)) == 0) {
- /* normal asynchronous message */
- *receipt = 0;
- }
- } else {
- /* could not find priority key */
- *receipt = 0;
- }
-
- } else {
- /* could not find local logger PID, confirmation can not be sent */
- *receipt = 0;
- }
-#endif
-
- len = encode_il_msg(buffer, ev);
- if(len < 0) {
- set_error(IL_NOMEM, ENOMEM, "create_msg: out of memory allocating message");
- return(-1);
- }
- return(len);
-}
-
-
-struct server_msg *
-server_msg_create(il_octet_string_t *event, long offset)
-{
- struct server_msg *msg;
-
- msg = malloc(sizeof(*msg));
- if(msg == NULL) {
- set_error(IL_NOMEM, ENOMEM, "server_msg_create: out of memory allocating message");
- return(NULL);
- }
-
- if(server_msg_init(msg, event) < 0) {
- server_msg_free(msg);
- return(NULL);
- }
- msg->offset = offset;
-
- return(msg);
-}
-
-
-struct server_msg *
-server_msg_copy(struct server_msg *src)
-{
- struct server_msg *msg;
-
- msg = malloc(sizeof(*msg));
- if(msg == NULL) {
- set_error(IL_NOMEM, ENOMEM, "server_msg_copy: out of memory allocating message");
- return(NULL);
- }
-
- msg->msg = malloc(src->len);
- if(msg->msg == NULL) {
- set_error(IL_NOMEM, ENOMEM, "server_msg_copy: out of memory allocating server message");
- server_msg_free(msg);
- return(NULL);
- }
- msg->len = src->len;
- memcpy(msg->msg, src->msg, src->len);
-
- msg->job_id_s = strdup(src->job_id_s);
- msg->ev_len = src->ev_len;
- msg->es = src->es;
- msg->receipt_to = src->receipt_to;
- msg->offset = src->offset;
-#if defined(IL_NOTIFICATIONS)
- msg->dest_name = strdup(src->dest_name);
- msg->dest_port = src->dest_port;
- msg->dest = strdup(src->dest);
-#endif
- return(msg);
-}
-
-
-int
-server_msg_init(struct server_msg *msg, il_octet_string_t *event)
-{
-#if defined(IL_NOTIFICATIONS)
- edg_wll_Context context;
- edg_wll_Event *notif_event;
- int ret;
-#endif
-
- assert(msg != NULL);
- assert(event != NULL);
-
- memset(msg, 0, sizeof(*msg));
-
-
-#if defined(IL_NOTIFICATIONS)
- edg_wll_InitContext(&context);
-
- /* parse the notification event */
- if((ret=edg_wll_ParseNotifEvent(context, event, ¬if_event))) {
- set_error(IL_LBAPI, ret, "server_msg_init: error parsing notification event");
- return(-1);
- }
- /* FIXME: check for allocation error */
- if(notif_event->notification.dest_host &&
- (strlen(notif_event->notification.dest_host) > 0)) {
- msg->dest_name = strdup(notif_event->notification.dest_host);
- msg->dest_port = notif_event->notification.dest_port;
- asprintf(&msg->dest, "%s:%d", msg->dest_name, msg->dest_port);
- }
- msg->job_id_s = edg_wll_NotifIdUnparse(notif_event->notification.notifId);
- if(notif_event->notification.jobstat &&
- (strlen(notif_event->notification.jobstat) > 0)) {
- msg->len = create_msg(event, &msg->msg, &msg->receipt_to);
- }
- edg_wll_FreeEvent(notif_event);
- free(notif_event);
- if(msg->len < 0) {
- return(-1);
- }
-#else
- msg->len = create_msg(event, &msg->msg, &msg->receipt_to);
- if(msg->len < 0) {
- return(-1);
- }
- msg->job_id_s = edg_wll_GetJobId(event->data);
-#endif
- /* remember to add event separator to the length */
- msg->ev_len = event->len + 1;
-
- if(msg->job_id_s == NULL) {
- set_error(IL_LBAPI, EDG_WLL_ERROR_PARSE_BROKEN_ULM, "server_msg_init: error getting id");
- return(-1);
- }
-
- return(0);
-}
-
-
-int
-server_msg_is_priority(struct server_msg *msg)
-{
- assert(msg != NULL);
-
- return(msg->receipt_to != 0);
-}
-
-
-int
-server_msg_free(struct server_msg *msg)
-{
- assert(msg != NULL);
-
- if(msg->msg) free(msg->msg);
- if(msg->job_id_s) free(msg->job_id_s);
-#if defined(IL_NOTIFICATIONS)
- if(msg->dest_name) free(msg->dest_name);
- if(msg->dest) free(msg->dest);
-#endif
- free(msg);
- return 0;
-}
+++ /dev/null
-#include "IlTestBase.h"
-
-#include <string.h>
-
-const char *IlTestBase::msg = "DATE=20040831150159.702224 HOST=\"some.host\" PROG=edg-wms LVL=USAGE DG.PRIORITY=0 DG.SOURCE=\"UserInterface\" DG.SRC_INSTANCE=\"\" DG.EVNT=\"RegJob\" DG.JOBID=\"https://some.host:1234/x67qr549qc\" DG.SEQCODE=\"UI=2:NS=0:WM=0:BH=1:JSS=0:LM=0:LRMS=0:APP=0\" DG.USER=\"/C=CZ/O=Cesnet/CN=Michal Vocu\" DG.REGJOB.JDL=\"\" DG.REGJOB.NS=\"ns address\" DG.REGJOB.PARENT=\"\" DG.REGJOB.JOBTYPE=\"SIMPLE\" DG.REGJOB.NSUBJOBS=\"0\" DG.REGJOB.SEED=\"\"";
-
-const char *IlTestBase::msg_enc = " 429\n6 michal\n415 DATE=20040831150159.702224 HOST=\"some.host\" PROG=edg-wms LVL=USAGE DG.PRIORITY=0 DG.SOURCE=\"UserInterface\" DG.SRC_INSTANCE=\"\" DG.EVNT=\"RegJob\" DG.JOBID=\"https://some.host:1234/x67qr549qc\" DG.SEQCODE=\"UI=2:NS=0:WM=0:BH=1:JSS=0:LM=0:LRMS=0:APP=0\" DG.USER=\"/C=CZ/O=Cesnet/CN=Michal Vocu\" DG.REGJOB.JDL=\"\" DG.REGJOB.NS=\"ns address\" DG.REGJOB.PARENT=\"\" DG.REGJOB.JOBTYPE=\"SIMPLE\" DG.REGJOB.NSUBJOBS=\"0\" DG.REGJOB.SEED=\"\"\n";
-
-const struct server_msg IlTestBase::smsg = {
- "https://some.host:1234/x67qr549qc",
- (char*)IlTestBase::msg_enc,
- strlen(IlTestBase::msg_enc),
- strlen(IlTestBase::msg) + 1,
- NULL
-};
+++ /dev/null
-extern "C" {
-#include "interlogd.h"
-}
-
-class IlTestBase {
-public:
- static const char *msg;
- static const char *msg_enc;
- static const struct server_msg smsg;
-};
+++ /dev/null
-#include <cppunit/extensions/HelperMacros.h>
-
-#include "IlTestBase.h"
-
-extern "C" {
-struct event_queue_msg {
- struct server_msg *msg;
- struct event_queue_msg *prev;
-};
-}
-
-#include <string>
-using namespace std;
-
-class event_queueTest: public CppUnit::TestFixture
-{
- CPPUNIT_TEST_SUITE( event_queueTest );
- CPPUNIT_TEST( testEventQueueCreate );
- CPPUNIT_TEST( testEventQueueInsert );
- CPPUNIT_TEST( testEventQueueGet );
- CPPUNIT_TEST( testEventQueueRemove );
- CPPUNIT_TEST_SUITE_END();
-
-public:
- void setUp() {
- server = strdup("localhost:8080");
- eq = event_queue_create(server);
- free(server);
- }
-
- void tearDown() {
- struct event_queue_msg *mp;
- struct server_msg *m;
-
- for(mp = eq->head; mp != NULL; ) {
- struct event_queue_msg *mq;
-
- server_msg_free(mp->msg);
- mq = mp;
- mp = mp->prev;
- free(mq);
- }
- eq->head = NULL;
- event_queue_free(eq);
- }
-
- void testEventQueueCreate() {
- CPPUNIT_ASSERT( eq != NULL );
- CPPUNIT_ASSERT_EQUAL( string(eq->dest_name), string("localhost") );
- CPPUNIT_ASSERT_EQUAL( eq->dest_port, 8081 );
- CPPUNIT_ASSERT( eq->tail == NULL );
- CPPUNIT_ASSERT( eq->head == NULL );
- CPPUNIT_ASSERT( eq->tail_ems == NULL );
- CPPUNIT_ASSERT( eq->mark_this == NULL );
- CPPUNIT_ASSERT( eq->mark_prev == NULL );
- CPPUNIT_ASSERT( eq->thread_id == 0 );
- CPPUNIT_ASSERT( eq->flushing == 0 );
- CPPUNIT_ASSERT( eq->flush_result == 0 );
- }
-
- void testEventQueueInsert() {
- struct event_queue_msg *mp;
- struct server_msg *m;
-
- doSomeInserts();
- mp = eq->head;
- m = mp->msg;
- CPPUNIT_ASSERT_EQUAL( string(m->job_id_s), string("2") );
- CPPUNIT_ASSERT_EQUAL( mp, eq->tail_ems );
- mp = mp->prev;
- m = mp->msg;
- CPPUNIT_ASSERT_EQUAL( string(m->job_id_s), string("1") );
- mp = mp->prev;
- m = mp->msg;
- CPPUNIT_ASSERT_EQUAL( string(m->job_id_s), string("3") );
- CPPUNIT_ASSERT_EQUAL( mp, eq->tail );
- CPPUNIT_ASSERT( mp->prev == NULL );
- }
-
- void testEventQueueGet() {
- struct event_queue_msg *mp;
- struct server_msg *m,sm;
- int ret;
-
- doSomeInserts();
- mp = eq->head;
- eq->head = mp->prev;
- eq->tail_ems = NULL;
- server_msg_free(mp->msg);
- free(mp);
- ret = event_queue_get(eq, &m);
- CPPUNIT_ASSERT( ret == 0 );
- CPPUNIT_ASSERT( eq->mark_this == eq->head );
- CPPUNIT_ASSERT( eq->mark_prev == NULL );
- CPPUNIT_ASSERT_EQUAL( string("1"), string(m->job_id_s) );
- sm = IlTestBase::smsg;
- sm.job_id_s = "4";
- sm.receipt_to = 1;
- ret = event_queue_insert(eq, &sm);
- CPPUNIT_ASSERT( ret == 0 );
- CPPUNIT_ASSERT( eq->mark_prev == eq->head );
- CPPUNIT_ASSERT( eq->mark_this == eq->head->prev );
- ret = event_queue_insert(eq, &sm);
- CPPUNIT_ASSERT( ret == 0 );
- CPPUNIT_ASSERT( eq->mark_prev == eq->head->prev );
- CPPUNIT_ASSERT( eq->mark_this == eq->head->prev->prev );
- }
-
- void testEventQueueRemove() {
- struct event_queue_msg *mp;
- struct server_msg *m,sm;
- int ret;
-
- doSomeInserts();
- ret = event_queue_get(eq, &m);
- mp = eq->mark_this->prev;
- sm = IlTestBase::smsg;
- sm.job_id_s = "4";
- sm.receipt_to = 1;
- event_queue_insert(eq, &sm);
- ret = event_queue_remove(eq);
- CPPUNIT_ASSERT( eq->head->prev == mp );
- CPPUNIT_ASSERT( eq->mark_this == NULL );
- CPPUNIT_ASSERT( eq->mark_prev == NULL );
- }
-
-protected:
- char *server;
- struct event_queue *eq;
-
- void doSomeInserts() {
- struct server_msg m = IlTestBase::smsg;
-
- m.job_id_s = "1";
- event_queue_insert(eq, &m);
- m.receipt_to = 1;
- m.job_id_s = "2";
- event_queue_insert(eq, &m);
- m.job_id_s = "3";
- m.receipt_to = 0;
- event_queue_insert(eq, &m);
- }
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION( event_queueTest );
+++ /dev/null
-#include <cppunit/extensions/HelperMacros.h>
-
-#include "IlTestBase.h"
-
-class event_storeTest: public CppUnit::TestFixture
-{
- CPPUNIT_TEST_SUITE( event_storeTest );
- CPPUNIT_TEST( event_store_recoverTest );
- CPPUNIT_TEST( event_store_syncTest );
- CPPUNIT_TEST( event_store_nextTest );
- CPPUNIT_TEST( event_store_commitTest );
- CPPUNIT_TEST( event_store_cleanTest );
- CPPUNIT_TEST( event_store_findTest );
- CPPUNIT_TEST( event_store_releaseTest );
- CPPUNIT_TEST( event_store_initTest );
- CPPUNIT_TEST( event_store_recover_allTest );
- CPPUNIT_TEST( event_store_cleanupTest );
- CPPUNIT_TEST_SUITE_END();
-
-public:
- void setUp() {
- }
-
- void tearDown() {
- }
-
- void event_store_recoverTest() {
- }
-
- void event_store_syncTest() {
- }
-
- void event_store_nextTest() {
- }
-
- void event_store_commitTest() {
- }
-
- void event_store_cleanTest() {
- }
-
- void event_store_findTest() {
- }
-
- void event_store_releaseTest() {
- }
-
- void event_store_initTest() {
- }
-
- void event_store_recover_allTest() {
- }
-
- void event_store_cleanupTest() {
- }
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION( event_storeTest );
+++ /dev/null
-#include <cppunit/extensions/TestFactoryRegistry.h>
-#include <cppunit/ui/text/TestRunner.h>
-
-extern "C" {
-#include <pthread.h>
-#include "glite/wmsutils/tls/ssl_helpers/ssl_inits.h"
-#include "glite/wmsutils/tls/ssl_helpers/ssl_pthreads.h"
-#include "glite/security/glite_gss.h"
-#include "interlogd.h"
-#include "glite/lb/consumer.h"
-}
-
-#if defined(IL_NOTIFICATIONS)
-#define DEFAULT_PREFIX "/tmp/notif_events"
-#define DEFAULT_SOCKET "/tmp/notif_interlogger.sock"
-#else
-#define DEFAULT_PREFIX "/tmp/dglogd.log"
-#define DEFAULT_SOCKET "/tmp/interlogger.sock"
-#endif
-
-int TIMEOUT = DEFAULT_TIMEOUT;
-
-gss_cred_id_t cred_handle = GSS_C_NO_CREDENTIAL;
-pthread_mutex_t cred_handle_lock = PTHREAD_MUTEX_INITIALIZER;
-
-char *file_prefix = DEFAULT_PREFIX;
-int bs_only = 0;
-
-char *cert_file = NULL;
-char *key_file = NULL;
-char *CAcert_dir = NULL;
-char *log_server = NULL;
-char *socket_path = DEFAULT_SOCKET;
-
-
-int
-main (int ac,const char *av[])
-{
- CppUnit::Test *suite = CppUnit::TestFactoryRegistry::getRegistry().makeTest();
- CppUnit::TextUi::TestRunner runner;
-
- runner.addTest(suite);
- return runner.run() ? 0 : 1;
-}
+++ /dev/null
-#include <cppunit/extensions/HelperMacros.h>
-
-#include "IlTestBase.h"
-
-extern "C" {
-#include <sys/socket.h>
-#include <sys/un.h>
-#include <unistd.h>
-
-#include "interlogd.h"
-
- extern char *socket_path;
-}
-
-#include <string>
-using namespace std;
-
-class input_queue_socketTest: public CppUnit::TestFixture
-{
- CPPUNIT_TEST_SUITE( input_queue_socketTest );
- CPPUNIT_TEST( input_queue_getTest );
- CPPUNIT_TEST_SUITE_END();
-
-public:
-
- void setUp() {
- struct sockaddr_un saddr;
- int sock;
- long offset = 0;
-
- int ret = input_queue_attach();
- CPPUNIT_ASSERT(ret == 0);
-
- sock=socket(PF_UNIX, SOCK_STREAM, 0);
- CPPUNIT_ASSERT(sock >= 0);
-
- memset(&saddr, 0, sizeof(saddr));
- saddr.sun_family = AF_UNIX;
- strcpy(saddr.sun_path, socket_path);
- ret = connect(sock, (struct sockaddr *)&saddr, sizeof(saddr.sun_path));
- CPPUNIT_ASSERT(ret >= 0);
-
- ret = write(sock, &offset, sizeof(offset));
- CPPUNIT_ASSERT( ret == sizeof(offset) );
- ret = write(sock, IlTestBase::msg, strlen(IlTestBase::msg));
- CPPUNIT_ASSERT( ret == strlen(IlTestBase::msg) );
- ret = write(sock, "\n", 1);
- CPPUNIT_ASSERT( ret == 1 );
- }
-
- void tearDown() {
- input_queue_detach();
- }
-
-
- void input_queue_getTest() {
- char *event;
- long offset;
- int ret;
-
- ret = input_queue_get(&event, &offset, 10);
- CPPUNIT_ASSERT( ret >= 0 );
- CPPUNIT_ASSERT_EQUAL( 0L, offset );
- CPPUNIT_ASSERT_EQUAL( string(IlTestBase::msg), string(event) );
- free(event);
- }
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(input_queue_socketTest);
+++ /dev/null
-#include <iostream>
-
-#include <cppunit/extensions/HelperMacros.h>
-#include <cppunit/CompilerOutputter.h>
-#include <cppunit/extensions/TestFactoryRegistry.h>
-#include <cppunit/ui/text/TestRunner.h>
-
-#include <sys/socket.h>
-#include <sys/un.h>
-#include <sys/time.h>
-#include <unistd.h>
-#include <errno.h>
-#include <assert.h>
-#include <string.h>
-
-extern "C" {
-#define DEFAULT_SOCKET "/tmp/interlogger.sock"
-char *socket_path = DEFAULT_SOCKET;
-int edg_wll_log_proto_server(int *,char *,char *,int,int);
-void edg_wll_ll_log_init(int);
-}
-
-class LLTest: public CppUnit::TestFixture
-{
- CPPUNIT_TEST_SUITE(LLTest);
- CPPUNIT_TEST(testProtoServer);
- CPPUNIT_TEST_SUITE_END();
-
-public:
-
- void setUp() {
- char *msg = "DATE=20040831150159.702224 HOST=\"some.host\" PROG=edg-wms LVL=USAGE DG.PRIORITY=0 DG.SOURCE=\"UserInterface\" DG.SRC_INSTANCE=\"\" DG.EVNT=\"RegJob\" DG.JOBID=\"https://some.host:1234/x67qr549qc\" DG.SEQCODE=\"UI=2:NS=0:WM=0:BH=1:JSS=0:LM=0:LRMS=0:APP=0\" DG.REGJOB.JDL=\"\" DG.REGJOB.NS=\"ns address\" DG.REGJOB.PARENT=\"\" DG.REGJOB.JOBTYPE=\"SIMPLE\" DG.REGJOB.NSUBJOBS=\"0\" DG.REGJOB.SEED=\"\"";
- pipe(pd);
- log_proto_client(pd[1], msg);
- input_queue_attach();
- }
-
- void tearDown() {
- close(pd[0]);
- close(pd[1]);
- input_queue_detach();
- }
-
- void testProtoServer() {
- int ret;
- edg_wll_ll_log_init(255);
- ret = edg_wll_log_proto_server(&pd[0],
- "michal",
- "/tmp/dglogd.log",
- 0,
- 0);
- CPPUNIT_ASSERT( ret == 0 );
- }
-
-private:
- int pd[2];
-
- int log_proto_client(int con, char *logline) {
- char header[32];
- int err;
- int size;
- u_int8_t size_end[4];
-
- err = 0;
- size = strlen(logline)+1;
- size_end[0] = size & 0xff; size >>= 8;
- size_end[1] = size & 0xff; size >>= 8;
- size_end[2] = size & 0xff; size >>= 8;
- size_end[3] = size;
- size = strlen(logline)+1;
-
- err = write(con, "DGLOG", 5);
- CPPUNIT_ASSERT(err == 5);
- err = write(con, size_end, 4);
- CPPUNIT_ASSERT(err == 4);
- err = write(con, logline, size);
- CPPUNIT_ASSERT( err == size );
-}
-
- int sock;
- int accepted;
-
-int
-input_queue_attach()
-{
- struct sockaddr_un saddr;
-
- CPPUNIT_ASSERT((sock=socket(PF_UNIX, SOCK_STREAM, 0)) >= 0);
-
- memset(&saddr, 0, sizeof(saddr));
- saddr.sun_family = AF_UNIX;
- strcpy(saddr.sun_path, socket_path);
-
- CPPUNIT_ASSERT(bind(sock, (struct sockaddr *)&saddr, sizeof(saddr)) >= 0);
- CPPUNIT_ASSERT(listen(sock, 5) >= 0 );
- return(0);
-}
-
-void input_queue_detach()
-{
- if (sock >= 0)
- close(sock);
- unlink(socket_path);
-}
-
-};
-
-
-CPPUNIT_TEST_SUITE_REGISTRATION( LLTest );
-
-int
-main (int ac,const char *av[])
-{
- CppUnit::Test *suite = CppUnit::TestFactoryRegistry::getRegistry().makeTest();
- CppUnit::TextUi::TestRunner runner;
-
- runner.addTest(suite);
- return runner.run() ? 0 : 1;
-}
+++ /dev/null
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <sys/un.h>
-#include <unistd.h>
-#include <netinet/in.h>
-#include <signal.h>
-#include <string.h>
-#include <syslog.h>
-#include <fcntl.h>
-
-#define edg_wll_gss_read_full(a,b,c,d,e,f) test_edg_wll_gss_read_full(a,b,c,d,e,f)
-#define edg_wll_gss_write_full(a,b,c,d,e,f) test_edg_wll_gss_write_full(a,b,c,d,e,f)
-#define edg_wll_GssConnection int
-
-#include "logd_proto.h"
-#include "glite/lb/context-int.h"
-#include "glite/lb/escape.h"
-#include "glite/lb/events_parse.h"
-
-int
-test_edg_wll_gss_read_full(int *fd,
- void *buf,
- size_t bufsize,
- struct timeval *timeout,
- size_t *total,
- edg_wll_GssStatus *code)
-{
- *total = read(*fd, buf, bufsize);
- return(*total < 0 ? *total : 0);
-}
-
-int
-test_edg_wll_gss_write_full(int *fd,
- const void *buf,
- size_t bufsize,
- struct timeval *timeout,
- size_t *total,
- edg_wll_GssStatus *code)
-{
- return(0);
-}
-
-#include "logd_proto.c"
+++ /dev/null
-#include <cppunit/extensions/HelperMacros.h>
-
-#include "IlTestBase.h"
-
-#include <string.h>
-
-using namespace std;
-
-class server_msgTest: public CppUnit::TestFixture
-{
- CPPUNIT_TEST_SUITE(server_msgTest);
- CPPUNIT_TEST( server_msg_createTest );
- CPPUNIT_TEST( server_msg_copyTest );
- CPPUNIT_TEST_SUITE_END();
-
-public:
-
- void setUp() {
- msg = server_msg_create((char *)IlTestBase::msg);
- }
-
- void tearDown() {
- server_msg_free(msg);
- }
-
- void server_msg_createTest() {
- CPPUNIT_ASSERT( msg != NULL );
- CPPUNIT_ASSERT_EQUAL( string(msg->job_id_s), string(IlTestBase::smsg.job_id_s) );
- CPPUNIT_ASSERT_EQUAL( string(msg->msg), string(IlTestBase::smsg.msg) );
- CPPUNIT_ASSERT_EQUAL( msg->len, IlTestBase::smsg.len );
- CPPUNIT_ASSERT_EQUAL( msg->ev_len, IlTestBase::smsg.ev_len );
- CPPUNIT_ASSERT_EQUAL( msg->es, IlTestBase::smsg.es );
- CPPUNIT_ASSERT( !server_msg_is_priority(msg) );
- }
-
- void server_msg_copyTest() {
- struct server_msg *msg2;
-
- msg2 = server_msg_copy(msg);
- CPPUNIT_ASSERT( msg2 != NULL );
- CPPUNIT_ASSERT( msg2 != msg );
- CPPUNIT_ASSERT_EQUAL( string(msg->job_id_s), string(msg2->job_id_s) );
- CPPUNIT_ASSERT( msg->job_id_s != msg2->job_id_s);
- CPPUNIT_ASSERT_EQUAL( string(msg->msg), string(msg2->msg) );
- CPPUNIT_ASSERT( msg->msg != msg2->msg );
- CPPUNIT_ASSERT_EQUAL( msg->len, msg2->len );
- CPPUNIT_ASSERT_EQUAL( msg->ev_len, msg2->ev_len );
- CPPUNIT_ASSERT_EQUAL( msg->es, msg2->es );
- server_msg_free(msg2);
- }
-
-private:
- struct server_msg *msg;
-};
-
-
-CPPUNIT_TEST_SUITE_REGISTRATION(server_msgTest);
+++ /dev/null
-LICENSE file for EGEE Middleware\r
-================================\r
-\r
-Copyright (c) 2004 on behalf of the EU EGEE Project: \r
-The European Organization for Nuclear Research (CERN), \r
-Istituto Nazionale di Fisica Nucleare (INFN), Italy\r
-Datamat Spa, Italy\r
-Centre National de la Recherche Scientifique (CNRS), France\r
-CS Systeme d'Information (CSSI), France\r
-Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden\r
-Universiteit van Amsterdam (UvA), Netherlands\r
-University of Helsinki (UH.HIP), Finlan\r
-University of Bergen (UiB), Norway\r
-Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom\r
-\r
-Redistribution and use in source and binary forms, with or without\r
-modification, are permitted provided that the following conditions are\r
-met: \r
-\r
-1. Redistributions of source code must retain the above copyright\r
-notice, this list of conditions and the following disclaimer.\r
-\r
-2. Redistributions in binary form must reproduce the above copyright\r
-notice, this list of conditions and the following disclaimer in the\r
-documentation and/or other materials provided with the distribution.\r
-\r
-3. The end-user documentation included with the redistribution, if\r
-any, must include the following acknowledgment: "This product includes\r
-software developed by The EU EGEE Project (http://cern.ch/eu-egee/)."\r
-Alternatively, this acknowledgment may appear in the software itself, if\r
-and wherever such third-party acknowledgments normally appear.\r
-\r
-4. The names EGEE and the EU EGEE Project must not be\r
-used to endorse or promote products derived from this software without\r
-prior written permission. For written permission, please contact\r
-<email address>.\r
-\r
-5. You are under no obligation whatsoever to provide anyone with any\r
-bug fixes, patches, or upgrades to the features, functionality or\r
-performance of the Software ("Enhancements") that you may develop over\r
-time; however, if you choose to provide your Enhancements to The EU\r
-EGEE Project, or if you choose to otherwise publish or distribute your\r
-Enhancements, in source code form without contemporaneously requiring\r
-end users of The EU EGEE Proejct to enter into a separate written license\r
-agreement for such Enhancements, then you hereby grant The EU EGEE Project\r
-a non-exclusive, royalty-free perpetual license to install, use, copy,\r
-modify, prepare derivative works, incorporate into the EGEE Middleware\r
-or any other computer software, distribute, and sublicense your\r
-Enhancements or derivative works thereof, in binary and source code\r
-form (if any), whether developed by The EU EGEE Project or third parties.\r
-\r
-THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED\r
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\r
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r
-DISCLAIMED. IN NO EVENT SHALL PROJECT OR ITS CONTRIBUTORS BE LIABLE\r
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\r
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\r
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\r
-BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\r
-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\r
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN\r
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
-\r
-This software consists of voluntary contributions made by many\r
-individuals on behalf of the EU EGEE Prject. For more information on The\r
-EU EGEE Project, please see http://cern.ch/eu-egee/. For more information on\r
-EGEE Middleware, please see http://egee-jra1.web.cern.ch/egee-jra1/\r
-\r
-\r
+++ /dev/null
-# defaults
-top_srcdir=.
-builddir=build
-top_builddir=${top_srcdir}/${builddir}
-stagedir=.
-distdir=.
-globalprefix=glite
-lbprefix=lb
-package=glite-lb-server
-version=0.2.0
-PREFIX=/opt/glite
-
-glite_location=/opt/glite
-globus_prefix=/opt/globus
-nothrflavour=gcc32
-thrflavour=gcc32pthr
-expat_prefix=/opt/expat
-gsoap_prefix=/opt/gsoap
-
--include Makefile.inc
--include ../project/version.properties
-
-version=${module.version}
-
-CC=gcc
-YACC=bison -y
-
-VPATH=${top_srcdir}/src:${top_srcdir}/test:${top_srcdir}/examples:${top_srcdir}/project
-AT3=perl -I${top_srcdir}/project ${top_srcdir}/project/at3
-
-TEST_LIBS:=-L${cppunit_prefix}/lib -lcppunit
-TEST_INC:=-I${cppunit_prefix}/include
-
-ifdef LB_PERF
- STATIC_LIB_BK:=${stagedir}/lib/libglite_lb_bkserver_perf.a
- LB_PERF_FLAGS:=-DLB_PERF
-else
- STATIC_LIB_BK:=${stagedir}/lib/libglite_lb_bkserver.a
-endif
-
-SUFFIXES = .T
-
-DEBUG:=-g -O0 -Wall
-
-ifdef LB_STANDALONE
- LB_STANDALONE_FLAGS:=-DLB_STANDALONE
-endif
-
-CFLAGS:= ${DEBUG} \
- -DVERSION=\"${version}\" \
- -I${stagedir}/include -I${top_srcdir}/src -I. \
- -I${expat_prefix}/include \
- -I${gsoap_prefix}/include \
- ${COVERAGE_FLAGS} \
- -I${mysql_prefix}/include -I${mysql_prefix}/include/mysql \
- -I${globus_prefix}/include/${nothrflavour} \
- -I${gridsite_prefix}/include -I${globus_prefix}/include/${nothrflavour}/openssl \
- -D_GNU_SOURCE ${LB_STANDALONE_FLAGS} ${LB_PERF_FLAGS}
-
-
-LINK:=libtool --mode=link ${CC} ${LDFLAGS}
-LINKXX:=libtool --mode=link ${CXX} ${LDFLAGS}
-INSTALL:=libtool --mode=install install
-LINKXX:=libtool --mode=link ${CXX} -rpath ${stagedir}/lib ${LDFLAGS}
-
-GLOBUS_LIBS:= -L${globus_prefix}/lib \
- -lglobus_common_${nothrflavour} \
- -lglobus_gssapi_gsi_${nothrflavour} \
-
-ifneq (${mysql_prefix},/usr)
- ifeq ($(shell echo ${mysql_version} | cut -d. -f1,2),4.1)
- mysqlib := -L${mysql_prefix}/lib/mysql
- else
- mysqlib := -L${mysql_prefix}/lib
- endif
-endif
-
-ifneq (${expat_prefix},/usr)
- expatlib := -L${expat_prefix}/lib
-endif
-
-
-SRVBONES_LIB:= -L${stagedir}/lib -lglite_lb_server_bones
-GRIDSITE_LIBS = -lgridsite_globus `xml2-config --libs`
-
-vomsflavour := _${nothrflavour}
-ifeq (${nothrflavour},gcc32)
- vomsflavour :=
-endif
-ifeq (${nothrflavour},gcc32dbg)
- vomsflavour :=
-endif
-
-VOMS_LIBS:=-L${voms_prefix}/lib -lvomsc${vomsflavour}
-EXT_LIBS:= \
- ${mysqlib} -lmysqlclient -lz\
- ${expatlib} -lexpat \
- ${GRIDSITE_LIBS} \
- ${VOMS_LIBS} \
- ${GLOBUS_LIBS}
-
-
-
-LB_PROXY_LIBS:= \
- ${STATIC_LIB_BK} \
- ${SRVBONES_LIB} \
- -lglite_lb_common_${nothrflavour} \
- -lglite_security_gss_${nothrflavour} \
- ${EXT_LIBS}
-
-
-
-glite_lb_proxy: lbproxy.o fake_write2rgma.o ${STATIC_LIB_BK}
- @echo DEBUG: mysql_version=${mysql_version} mysql_prefix=${mysql_prefix}
- @echo DEBUG: shell: x$(shell echo ${mysql_version} | cut -d. -f1,2)x
- ${LINK} -o $@ lbproxy.o fake_write2rgma.o ${LB_PROXY_LIBS}
-
-glite_lb_proxy_perf: lbproxy.o fake_write2rgma.o ${STATIC_LIB_BK}
- ${LINK} -o $@ lbproxy.o fake_write2rgma.o ${LB_PROXY_LIBS}
-
-default all: compile
-
-compile: glite_lb_proxy
-
-check: compile
- -echo No test so far
-
-examples:
-
-doc:
-
-stage: compile
- $(MAKE) install PREFIX=${stagedir} DOSTAGE=yes
-
-dist: distsrc distbin
-
-distsrc:
- mkdir -p ${top_srcdir}/${package}-${version}
- cd ${top_srcdir} && GLOBIGNORE="${package}-${version}" && cp -Rf * ${package}-${version}
- cd ${top_srcdir} && tar -czf ${distdir}/${package}-${version}_src.tar.gz --exclude-from=project/tar_exclude ${package}-${version}
- rm -rf ${top_srcdir}/${package}-${version}
-
-distbin:
- $(MAKE) install PREFIX=`pwd`/tmpbuilddir${stagedir}
- save_dir=`pwd`; cd tmpbuilddir${stagedir} && tar -czf $$save_dir/${top_srcdir}/${distdir}/${package}-${version}_bin.tar.gz *; cd $$save_dir
- rm -rf tmpbuilddir
-
-install:
- -mkdir -p ${PREFIX}/bin ${PREFIX}/etc ${PREFIX}/etc/init.d
- -mkdir -p ${PREFIX}/share/doc/${package}-${version}
- ${INSTALL} -m 644 ${top_srcdir}/LICENSE ${PREFIX}/share/doc/${package}-${version}
- ${INSTALL} -m 644 ${top_srcdir}/doc/README ${top_srcdir}/doc/README.deploy ${PREFIX}/share/doc/${package}-${version}
- ${INSTALL} -m 755 glite_lb_proxy ${PREFIX}/bin/glite-lb-proxy
- ${INSTALL} -m 644 ${top_srcdir}/config/glite-lb-dbsetup-proxy.sql ${PREFIX}/etc
- ${INSTALL} -m 755 ${top_srcdir}/config/startup ${PREFIX}/etc/init.d/glite-lb-proxy
-
-# don't do it due to RPM conflict
-# if [ x${DOSTAGE} != xyes ]; then \
-# ${INSTALL} -m 755 ${stagedir}/bin/glite-lb-interlogd ${PREFIX}/bin; \
-# fi
-
-
-clean:
-
-%.c: %.c.T
- rm -f $@
- ${AT3} $< >$@ || rm -f $@
- chmod -w $@ >/dev/null
-
-%.o: %.y
- ${YACC} -d ${YFLAGS} $<
- mv y.tab.c $*.c
- mv y.tab.h $*.h
- ${CC} -c ${CFLAGS} $*.c
- rm $*.c
-
-%.cpp: %.cpp.T
- rm -f $@
- ${AT3} $< >$@ || rm -f $@
- chmod -w $@ >/dev/null
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- Build file for the GLite LB Proxy module
-
- Authors: Jiri Skrabal <nykolas@ics.muni.cz>
- Release: $Name$
-
- Revision history:
-
--->
-
-<project name="proxy" default="dist">
-
- <!-- =========================================
- Builds the GLite LB Proxy Module
- ========================================= -->
-
- <!-- =========================================
- Import properties (order is important)
- ========================================= -->
-
- <!-- import baseline & user properties -->
- <import file="../org.glite/project/baseline.properties.xml" />
-
- <!-- import component build properties,
- component properties &
- component common properties -->
- <import file="./project/properties.xml"/>
-
- <!-- import subsystem build properties,
- subsystem properties &
- subsystem common properties -->
- <import file="${subsystem.properties.file}"/>
-
- <!-- import global build properties &
- global properties -->
- <import file="${global.properties.file}" />
-
- <!-- =========================================
- Load dependency property files (order is important)
- ========================================= -->
- <property file="${user.dependencies.file}"/>
- <property file="${component.dependencies.file}" />
- <property file="${subsystem.dependencies.file}" />
- <property file="${global.dependencies.file}"/>
-
- <!-- =========================================
- Load configure options (order is important)
- ========================================= -->
- <import file="${global.configure.options.file}"/>
- <import file="${component.configure.options.file}"/>
-
- <!-- =========================================
- Import task definitions (order is important)
- ========================================= -->
- <import file="${subsystem.taskdefs.file}" />
- <import file="${global.taskdefs.file}" />
-
- <!-- =========================================
- Load common targets
- ========================================= -->
- <import file="${global.targets-simple_make.file}" />
-
- <!-- =========================================
- Load version file
- ========================================= -->
- <property file="${module.version.file}"/>
- <property file="${module.build.file}"/>
-
- <!-- ==============================================
- Local private targets
- ============================================== -->
-
- <target name="localinit"
- description="Module specific initialization tasks">
- <!-- Copy support files from the subsystem project to the component project-->
- <copy toDir="${module.project.dir}">
- <fileset dir="${subsystem.project.dir}">
- <include name="at3" />
- <include name="*.T" />
- <include name="*.pm" />
- </fileset>
- </copy>
- <antcall target="lbmakefiles" />
- </target>
-
- <target name="localcompile"
- description="Module specific compile tasks">
- </target>
-
- <target name="localclean"
- description="Module specific cleaning tasks">
- <delete>
- <fileset dir="${module.project.dir}">
- <include name="at3" />
- <include name="*.T" />
- <include name="*.pm" />
- </fileset>
- </delete>
- </target>
-
- <!-- =========================================
- RPM settings
- ========================================= -->
-
- <property name="build.package.summary" value="L&B Proxy server" />
- <property name="build.package.description" value=" The daemon
-installed at the ??? machine.
It is responsible for accepting events from
-???, storing them in RDBMS, forwarding then to the real L&B server
and performing queries on client requests
-(job status, job log etc.).
Also includes purge utilities
-to remove (and optionally archive) inactive
data from
-the database and to change database index configuration." />
-
-</project>
+++ /dev/null
-create table jobs (
- jobid char(32) binary not null,
- dg_jobid varchar(255) binary not null,
- userid char(32) binary not null,
- aclid char(32) binary null,
-
- primary key (jobid),
- unique (dg_jobid),
- index (userid)
-);
-
-create table users (
- userid char(32) binary not null,
- cert_subj varchar(255) binary not null,
-
- primary key (userid),
- unique (cert_subj)
-);
-
-create table events (
- jobid char(32) binary not null,
- event int not null,
- code int not null,
- prog varchar(255) binary not null,
- host varchar(255) binary not null,
- time_stamp datetime not null,
- userid char(32) binary null,
- usec int null,
- level int null,
-
- arrived datetime not null,
-
-
- primary key (jobid,event),
- index (time_stamp),
- index (host),
- index (arrived)
-);
-
-create table short_fields (
- jobid char(32) binary not null,
- event int not null,
- name varchar(200) binary not null,
- value varchar(255) binary null,
-
- primary key (jobid,event,name)
-);
-
-create table long_fields (
- jobid char(32) binary not null,
- event int not null,
- name varchar(200) binary not null,
- value mediumblob null,
-
- primary key (jobid,event,name)
-);
-
-create table states (
- jobid char(32) binary not null,
- status int not null,
- seq int not null,
- int_status mediumblob not null,
- version varchar(32) not null,
- parent_job varchar(32) binary not null,
-
- primary key (jobid),
- index (parent_job)
-
-);
-
-create table status_tags (
- jobid char(32) binary not null,
- seq int not null,
- name varchar(200) binary not null,
- value varchar(255) binary null,
-
- primary key (jobid,seq,name)
-);
-
-create table server_state (
- prefix varchar(100) not null,
- name varchar(100) binary not null,
- value varchar(255) binary not null,
-
- primary key (prefix,name)
-);
-
-create table acls (
- aclid char(32) binary not null,
- value mediumblob not null,
- refcnt int not null,
-
- primary key (aclid)
-);
-
-create table notif_registrations (
- notifid char(32) binary not null,
- destination varchar(200) not null,
- valid datetime not null,
- userid char(32) binary not null,
- conditions mediumblob not null,
-
- primary key (notifid)
-);
-
-create table notif_jobs (
- notifid char(32) binary not null,
- jobid char(32) binary not null,
-
- primary key (notifid,jobid),
- index (jobid)
-);
+++ /dev/null
-#!/bin/sh
-
-GLITE_LOCATION=${GLITE_LOCATION:-/opt/glite}
-GLITE_LOCATION_VAR=${GLITE_LOCATION_VAR:-/var/glite}
-
-[ -f /etc/glite.conf ] && . /etc/glite.conf
-[ -f $GLITE_LOCATION/etc/glite-wms.conf ] && . $GLITE_LOCATION/etc/glite-wms.conf
-
-[ -f $GLITE_LOCATION/etc/lb.conf ] && . $GLITE_LOCATION/etc/lb.conf
-[ -f $GLITE_LOCATION_VAR/etc/lb.conf ] && . $GLITE_LOCATION_VAR/etc/lb.conf
-
-[ -f $HOME/.glite.conf ] && . $HOME/.glite.conf
-
-[ -n "$GLITE_LB_PROXY_PIDFILE" ] && pidfile=$GLITE_LB_PROXY_PIDFILE ||
- pidfile=$GLITE_LOCATION_VAR/glite-lb-proxy.pid
-
-start()
-{
- if test -z "$GLITE_USER" ;then
- echo 'Error: GLITE_USER is not set'
- echo FAILED
- return 1
- fi
-
- #
- # XXX: Starting proxy only with default socket paths
- #
- echo -n Starting glite-lb-proxy ...
- su - $GLITE_USER -c "$GLITE_LOCATION/bin/glite-lb-proxy \
- -i $pidfile " && echo " done" || echo " FAILED"
-}
-
-stop()
-{
- if [ -f $pidfile ]; then
- pid=`cat $pidfile`
- kill $pid
- echo -n Stopping glite-lb-proxy \($pid\) ...
- try=0
- while ps p $pid >/dev/null 2>&1; do
- sleep 1;
- try=`expr $try + 1`
- if [ $try = 20 ]; then
- echo " giving up after $try retries"
- return 1
- fi
- done
- echo " done"
- rm -f $pidfile
- else
- echo $pidfile does not exist - glite-lb-proxy not running? >&2
- return 1
- fi
-}
-
-status()
-{
- if [ -f $pidfile ]; then
- pid=`cat $pidfile`
- if ps p $pid >/dev/null 2>&1; then
- echo glite-lb-proxy running as $pid
- return 0
- fi
- fi
-
- echo glite-lb-proxy not running
- return 1
-}
-
-case x$1 in
- xstart) start;;
- xstop) stop;;
- xrestart) stop; start;;
- xstatus) status;;
- x*) echo usage: $0 start,stop,restart,status >&2
- exit 1;;
-esac
+++ /dev/null
-LB proxy usage
-==============
-
-LB proxy accepts LB events, passes them to the full-featured LB servers, and
-serves both event and job state queries similarly to the LB server. However,
-the results of the queries reflect ONLY LOCAL VIEW of the proxy, i.e. the
-events that were logged through it, not regarding other events which may affect
-job state too. On the other hand, all the LB proxy operation is synchronous,
-i.e. it is guaranteed that a successfully logged event is visible in a query
-result immediately, unlike the standard LB logging chain.
-
-LB proxy is supposed to run on the RB machine, providing the WMS daemons
-with optimal LB performance, while offloading the user LB queries to
-a dedicated LB server.
-
-Events are logged to the LB proxy through local UNIX socket. The connection to
-the LB proxy is not encrypted, and no SSL-like authentication/authorisation is
-used, relying completely on UNIX security mechanism. Because of the missing
-SSL authentication the logging user identity has to be specified explicitely
-via LB proxy API.
-
-Due to the synchornous operation LB proxy can help with management of LB
-sequence codes. It records the most recent sequence code which can be retrieved
-and used later, even by another WMS component. However, this mechanism works
-for the "one job instance at time" model only. Once multiple instances of the
-same job may co-exist (which may be the case of shallow resubmission), these
-are distinguished exactly with the LB seqence code, hence LB proxy cannot
-do the job. For this case LB proxy API still provides means of specifying
-the sequence code explicitely.
-
-All jobs are recorded localy in the LB proxy database until job gets into
-CLEARED, ABORTED, CANCELED, DONE state. These jobs are then purged from LB
-proxy (but they are still available on the LB server).
->>A timeout should be set also, after which job should be purged from LB proxy.
-This has to be done by external purge client. <<
-
-Using API
-=========
-LB proxy uses same API functions for consumer and producer as LB does except the
-function names are postfixed with 'Proxy'. I.e. edg_wll_SetLoggingJob()
-has its own LB proxy variant edg_wll_SetLoggingJobProxy().
-
-For LB proxy comunication are used two separated local unix sockets. One for
-consumer other for producer API calls. Their values are set when the LB context
-is initialized according to unix enironment variables EDG_WL_LBPROXY_STORE_SOCK
-(producer API) and EDG_WL_LBPROXY_SERVE_SOCK (consumer API). If these variables
-are not set, default values are used (producer: "/tmp/lb_proxy_store.sock",
-consumer: "/tmp/lb_proxy_serve.sock"). After all you can overwrite them with
-edg_wll_SetParam(ctx, EDG_WLL_PARAM_LBPROXY_STORE_SOCK, "path_to_store_socket");
-and
-edg_wll_SetParam(ctx, EDG_WLL_PARAM_LBPROXY_SERVE_SOCK, "path_to_serve_socket");
-
-Other important difference is in edg_wll_SetLoggingJobProxy(), where it is not
-neccessary to give the sequence code parameter. If user do not specify it, LB
-proxy gets the actual one from its database. The 'user' attribute of this call
-should be the user certificate DN string. If not set, it is set from the unix
-environment by default.
-
-For example you could use:
-char *user = "/O=CESNET/O=Masaryk University/CN=Jiri Skrabal";
-edg_wll_SetLoggingJobProxy(ctx, jobid, NULL, user, EDG_WLL_SEQ_NORMAL);
-
-Only very different LB Proxy call is the 'RegisterJob'. The current
-implementation of RegisterJobProxy() talks both to the LB server
-bypassing interlogger, and to the LB proxy at once. So for job
-registration you should supply the user's credentials to the
-context in the same way as it was done until now. The other proxy calls
-don't need it.
+++ /dev/null
-The LB Proxy RPM package contains following files:
-bin/glite-lb-interlogd
-bin/glite-lb-proxy
-etc/glite-lb-dbsetup-proxy.sql
-etc/init.d/glite-lb-proxy
-share/doc/glite-lb-proxy-1.1.1/LICENSE
-
-The LB Proxy RPM package depends on glite-lb-client-interface and
-glite-lb-common RPM packages. In addition, a reasonable run-time functionality
-depends also on glite-lb-logger and running interlogger.
-
-LB Proxu is supposed to run on RB machine and its outgoing communication goes through
-interlogger. As a interlogger could be used that one which is used for any other
-logging calls. In fact, using more than one standard interlogger on one machine
-has usually not a valid reason. For full LB Proxy functionality you need to
-have running LB server (on any location).
-
-LB Proxy install should follow several steps described bellow:
-
-0) LB Proxy RPM package install
-
-1) Create new database in same way as it is done for bkserver.
- Database name: lbproxy
- Grant privileges to user: lbserver
- Database has the same structure as bkserver has (you can use
- sql script etc/glite-lb-dbsetup-proxy.sql to create propper tables).
-
-2) Start servers with scripts from distribution
- /opt/glite/etc/init.d/glite-lb-locallogger start
- /opt/glite/etc/init.d/glite-lb-proxy start
-
- The glite-lb-locallogger script goes with the glite-lb-logger RPM package.
- and runs locallogger and interlogger as well. This is not neccessary to run
- locallogger if you do not use direct LB server logging calls, so you can
- simply start everythink you need from command line.
-
- The options to the LB proxy server:
- -p, --sock path-name to the local socket
- This is the path prefix for both LB Proxy unix
- sockets. Default value is "/tmp/lb_proxy_".
- -m, --mysql database connect string
- This has the same functionality as it is described in LB server
- Default value is "lbserver/@localhost:lbproxy".
- -d, --debug don't run as daemon, additional diagnostics
- -s, --slaves number of slave servers to fork
- -l, --semaphores number of semaphores (job locks) to use
- -i, --pidfile file to store master pid
- --proxy-il-sock socket to send events to interlogger
- Default value is "/tmp/interlogger.sock".
- --proxy-il-fprefix file prefix for events
- Default value is "/tmp/notif_events".
-
-4) Test the environment basic functionality:
- # job_reg uses direct access to the bkserver at `hostname -f`:9000
- # and to the LBProxy store socket (env. var EDG_WL_LBPROXY_STORE_SOCK)
- # at once
- ./glite-lb-job_reg -m `hostname -f`:9000 -x -s UserInterface
- # log usertag COLOR = red to the proxy
- ./glite-lb-log_usertag_proxy -s /tmp/lb_proxy_store.sock -j <jobid> -u test -n color -v red
- # and check lbserver values with job_stat
+++ /dev/null
-#!/bin/sh
-
-# XXX: add path to the stage area
-PATH=/home/michal/shared/egee/jra1-head/stage/bin:/home/michal/shared/egee/jra1-head/stage/examples:$PATH
-
-#set -x
-
-# Binaries
-LOGEV=${LOGEV:-glite-lb-logevent}
-JOBLOG=${JOBLOG:-glite-lb-job_log}
-JOBREG=${JOBREG:-glite-lb-job_reg}
-USERJOBS=${USERJOBS:-glite-lb-user_jobs}
-JOBSTAT=${JOBSTAT:-glite-lb-job_status}
-PURGE=${PURGE:-glite-lb-purge}
-
-# -m host
-BKSERVER_HOST=${BKSERVER_HOST:-`hostname -f`:9000}
-TEST_LBPROXY_STORE_SOCK=${EDG_WL_LBPROXY_STORE_SOCK:-/tmp/lb_proxy_store.sock}
-TEST_LBPROXY_SERVE_SOCK=${EDG_WL_LBPROXY_SERVE_SOCK:-/tmp/lb_proxy_serve.sock}
-
-STATES="aborted cancelled done ready running scheduled waiting"
-LBPROXY_PURGE_STATES="cleared done aborted cancelled"
-JOBS_ARRAY_SIZE=10
-SAMPLE_JOBS_ARRAY[0]=
-SAMPLE_JOBS_STATES[0]=
-SAMPLE_JOBS_RESPONSES[0]=
-
-# some defaults
-DEBUG=2
-LOGFD=${LOGFD:-1}
-LARGE_STRESS=${LARGE_STRESS:-}
-
-# timeouts for polling the bkserver
-timeout=10
-maxtimeout=300
-
-#
-# Procedures
-#
-
-# print help message
-show_help()
-{
- echo "Usage: $0 [OPTIONS] "
- echo "Options:"
- echo " -h | --help Show this help message."
- echo " -x | --proxy-sockpath-pref LBProxy socket path prefix."
- echo " -j | --jobs-count Count of test(ed) jobs."
- echo " -s | --states List of states in which could tested jobs fall."
- echo " -p | --proxy-purge-states List of states in which LBProxy purges the job."
- echo " -l | --large-stress 'size' Do a large stress logging ('size' random data added to the messages."
- echo " -g | --log 'logfile' Redirect all output to the 'logfile'."
- echo ""
- echo "For proper operation check your grid-proxy-info"
- grid-proxy-info
-}
-
-check_exec()
-{
- [ $DEBUG -gt 0 ] && [ -n "$2" ] && echo -n -e "$2\t" || echo -n -e "$1\t"
- eval $1
- RV=$?
- [ $DEBUG -gt 0 ] && [ $RV -eq 0 ] && echo "OK" || echo "FAILED"
- return $RV
-}
-
-# check for existance of needed executable(s)
-check_utils()
-{
- check_exec 'JOBREG=`which $JOBREG`' "Checkig $JOBREG utility" || exit 1
- check_exec 'JOBLOG=`which $JOBLOG`' "Checkig $JOBLOG utility" || exit 1
- check_exec 'LOGEV=`which $LOGEV`' "Checkig $LOGEV utility" || exit 1
- check_exec 'USERJOBS=`which $USERJOBS`' "Checkig $USERJOBS utility" || exit 1
- check_exec 'JOBSTAT=`which $JOBSTAT`' "Checkig $JOBSTAT utility" || exit 1
-}
-
-log_ev()
-{
-# $LOGEV -j $EDG_JOBID -s NetworkServer -e UserTag --name color --value red
- [ $DEBUG -gt 2 ] && echo "$LOGEV -j \"$EDG_JOBID\" -s UserInterface -c \"$EDG_WL_SEQUENCE\" $@"
- EDG_WL_SEQUENCE=`$LOGEV $LARGE_STRESS -j $EDG_JOBID -s UserInterface -c $EDG_WL_SEQUENCE "$@"`
- test $? -ne 0 -o -z "$EDG_WL_SEQUENCE" && echo "missing EDG_WL_SEQUENCE from $LOGEV"
-}
-
-log_ev_proxy()
-{
-# $LOGEV -x -j $EDG_JOBID -s NetworkServer -e UserTag --name color --value red
-
- [ $DEBUG -gt 2 ] && echo "$LOGEV -x -j \"$EDG_JOBID\" -s UserInterface -c \"$EDG_WL_SEQUENCE\" $@"
- EDG_WL_SEQUENCE=`$LOGEV -x $LARGE_STRESS -j $EDG_JOBID -s UserInterface -c $EDG_WL_SEQUENCE "$@"`
- test $? -ne 0 -o -z "$EDG_WL_SEQUENCE" && echo "missing EDG_WL_SEQUENCE from $LOGEV"
-}
-
-purge()
-{
- [ $DEBUG -gt 2 ] && echo "$PURGE -a 0 -c 0 -n 0 -o 0 $@"
- $PURGE -a 0 -c 0 -n 0 -o 0 "$@"
-}
-
-purge_proxy()
-{
- [ $DEBUG -gt 2 ] && echo "$PURGE -x -a 0 -c 0 -n 0 -o 0 $@"
- $PURGE -x -a 0 -c 0 -n 0 -o 0 "$@"
-}
-
-
-db_clear_jobs()
-{
- [ $DEBUG -gt 0 ] && echo -n -e "Purging test jobs from db\t\t"
- job=0
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
- LARGE_STRESS=""
- EDG_WL_SEQUENCE="UI=999999:NS=9999999999:WM=999999:BH=9999999999:JSS=999999:LM=999999:LRMS=999999:APP=999999"
-# log_ev_proxy -e Clear --reason=PurgingDB
-# purge_proxy
-# log_ev -e Clear --reason=PurgingDB
-# purge
-
- job=$(($job + 1))
- done
- [ $DEBUG -gt 0 ] && echo "OK"
-}
-
-# Test thet registers jobs
-# and checks against lbproxy and bkserver
-#
-test_gen_sample_jobs()
-{
- [ $DEBUG -gt 0 ] && echo -n -e "Registering sample jobs\t\t\t"
- job=0
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
-# eval `$JOBREG -x -m $BKSERVER_HOST -s UserInterface 2>&1 | tail -n 2`
- TMP=`$JOBREG -x -m $BKSERVER_HOST -s UserInterface 2>&1`
- [ $? -ne 0 ] && echo -e "ERROR\n\t$JOBREG error!"
- eval `echo "$TMP" | tail -n 2`
- test -z "$EDG_JOBID" && echo "test_gen_sample_jobs: $JOBREG failed" && exit 2
- SAMPLE_JOBS_ARRAY[$job]=$EDG_JOBID
-
- state=`$JOBSTAT $EDG_JOBID 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
- proxy_state=`$JOBSTAT -x $TEST_LBPROXY_SERVE_SOCK $EDG_JOBID 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
- if test "$state" != "submitted" ; then
- echo -e "ERROR\n\tjob ${SAMPLE_JOBS_ARRAY[$job]} not submitted succesfully!"
- exit 1;
- fi
- if test "$state" != "$proxy_state" ; then
- echo -e "ERROR\n\tjob $job (${SAMPLE_JOBS_ARRAY[$job]}) records on lbproxy and bkserver differs!"
-# exit 1;
- fi
- SAMPLE_JOBS_STATES[$job]=$state
-
- job=$(($job + 1))
- done
- [ $DEBUG -gt 0 ] && echo "OK"
- [ $DEBUG -gt 1 ] && {
- job=0
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
- echo ${SAMPLE_JOBS_ARRAY[$job]}
- job=$(($job + 1))
- done
- }
-}
-
-# Test that logs random set of events (for registered jobs) to lbproxy
-# and chcecks the state in lbproxy
-# and measures the time it takes the state to propagate to bkserver
-#
-test_logging_events()
-{
- [ $DEBUG -gt 0 ] && echo -n -e "Logging events to the lbproxy\t\t"
- st_count=`echo $STATES | wc -w`
- job=0
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
- tmp=`echo $RANDOM % $st_count + 1 | bc`
- state=`echo $STATES | cut -d " " -f $tmp | tr A-Z a-z`
-
- source glite-lb-$state.sh $LARGE_STRESS -X $TEST_LBPROXY_STORE_SOCK -m $BKSERVER_HOST -j ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 1>/dev/null
- [ $? -ne 0 ] && echo -e "ERROR\n\tglite-lb-$state.sh ${SAMPLE_JOBS_ARRAY[$job]} error!"
- proxy_state=`$JOBSTAT -x $TEST_LBPROXY_SERVE_SOCK ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
- purged=`echo $LBPROXY_PURGE_STATES | grep $state`
- bkserver_state=`$JOBSTAT ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-
- if test -n "$purged" ; then
- echo $proxy_state | grep "No such file or directory"
- if test $? -eq 0 ; then
- echo -e "ERROR\n\tJob ${SAMPLE_JOBS_ARRAY[$job]} was not purged out from LBProxy!"
- exit 1;
- fi
- fi
- if test -z "$purged" ; then
- if test "$state" != "$proxy_state" ; then
- echo -e "ERROR\n\tevents for job ${SAMPLE_JOBS_ARRAY[$job]} were not logged succesfully!"
- exit 1;
- fi
- fi
-
- response=0
- while [ "$state" != "$bkserver_state" ] ; do
- bkserver_state=`$JOBSTAT ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
- [ $DEBUG -gt 0 ] && echo -n "."
- sleep $timeout
- response=$(($response + $timeout ))
- if test $response -gt $maxtimeout ; then
- echo -e "ERROR\n\tstatus of job ${SAMPLE_JOBS_ARRAY[$job]} as queried from bkserver ($bkserver_state) has not become $state for more than $response seconds!"
- exit 1;
- fi
- done
-
- SAMPLE_JOBS_STATES[$job]=$state
- SAMPLE_JOBS_RESPONSES[$job]=$response
- job=$(($job + 1))
- done
- [ $DEBUG -gt 0 ] && echo "OK"
- [ $DEBUG -gt 1 ] && {
- job=0
- echo "Polling the bkserver took for individual jobs the following time"
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
- echo -e "${SAMPLE_JOBS_ARRAY[$job]} (${SAMPLE_JOBS_STATES[$job]})\t${SAMPLE_JOBS_RESPONSES[$job]} seconds"
- job=$(($job + 1))
- done
- }
-}
-
-
-#
-# shell starting code
-
-# without parameters show help message
-# test -z "$1" && show_help
-
-while test -n "$1"
-do
- case "$1" in
- "-h" | "--help") show_help && exit 0 ;;
- "-x" | "--proxy-sockpath-pref")
- shift
- export TEST_LBPROXY_STORE_SOCK=$1store.sock
- export TEST_LBPROXY_SERVE_SOCK=$1serve.sock
- ;;
- "-m" | "--bkserver") shift ; BKSERVER_HOST=$1 ;;
- "-j" | "--jobs-count") shift; JOBS_ARRAY_SIZE=$1 ;;
- "-s" | "--states") shift; STATES="$1" ;;
- "-p" | "--proxy-purge-states") shift; LBPROXY_PURGE_STATES="$1" ;;
- "-l" | "--large-stress") shift ; LARGE_STRESS="-l $1" ;;
- "-g" | "--log") shift ; logfile=$1 ;;
-
- *) echo "Unrecognized option $1" ;;
-
- esac
- shift
-done
-
-if test -n "$logfile" ; then
- LOGFD=3
- exec 3>$logfile
-fi
-
-
-echo "STATES = $STATES"
-echo "LBPROXY_PURGE_STATES = $LBPROXY_PURGE_STATES"
-
-check_utils
-
-test_gen_sample_jobs
-test_logging_events
-
-db_clear_jobs
+++ /dev/null
-#!/bin/sh
-
-# XXX: add path to the stage area
-PATH=/home/michal/shared/egee/jra1/stage/bin:/home/michal/shared/egee/jra1/stage/examples:$PATH
-
-#set -x
-
-# Binaries
-LOGEV=${LOGEV:-glite-lb-logevent}
-JOBLOG=${JOBLOG:-glite-lb-job_log}
-JOBREG=${JOBREG:-glite-lb-job_reg}
-USERJOBS=${USERJOBS:-glite-lb-user_jobs}
-JOBSTAT=${JOBSTAT:-glite-lb-job_status}
-PURGE=${PURGE:-glite-lb-purge}
-
-# -m host
-BKSERVER_HOST=${BKSERVER_HOST:-`hostname -f`:9000}
-TEST_LBPROXY_STORE_SOCK=${EDG_WL_LBPROXY_STORE_SOCK:-/tmp/lb_proxy_store.sock}
-TEST_LBPROXY_SERVE_SOCK=${EDG_WL_LBPROXY_SERVE_SOCK:-/tmp/lb_proxy_serve.sock}
-
-STATES="aborted cancelled done ready running scheduled waiting"
-LBPROXY_PURGE_STATES="cleared done aborted cancelled"
-JOBS_ARRAY_SIZE=10
-SAMPLE_JOBS_ARRAY[0]=
-SAMPLE_JOBS_STATES[0]=
-SAMPLE_JOBS_RESPONSES[0]=
-
-# some defaults
-DEBUG=2
-LOGFD=${LOGFD:-1}
-LARGE_STRESS=${LARGE_STRESS:-}
-
-# timeouts for polling the bkserver
-timeout=10
-maxtimeout=300
-
-#
-# Procedures
-#
-
-# print help message
-show_help()
-{
- echo "Usage: $0 [OPTIONS] "
- echo "Options:"
- echo " -h | --help Show this help message."
- echo " -x | --proxy-sockpath-pref LBProxy socket path prefix."
- echo " -j | --jobs-count Count of test(ed) jobs."
- echo " -s | --states List of states in which could tested jobs fall."
- echo " -p | --proxy-purge-states List of states in which LBProxy purges the job."
- echo " -l | --large-stress 'size' Do a large stress logging ('size' random data added to the messages."
- echo " -g | --log 'logfile' Redirect all output to the 'logfile'."
- echo ""
- echo "For proper operation check your grid-proxy-info"
- grid-proxy-info
-}
-
-get_time()
-{
- sec=`date +%s`
- nsec=`date +%N`
- time=`echo "1000000000*$sec + $nsec"|bc`
-# time=$sec
- return 0
-}
-
-check_exec()
-{
- [ $DEBUG -gt 0 ] && [ -n "$2" ] && echo -n -e "$2\t" || echo -n -e "$1\t"
- eval $1
- RV=$?
- [ $DEBUG -gt 0 ] && [ $RV -eq 0 ] && echo "OK" || echo "FAILED"
- return $RV
-}
-
-# check for existance of needed executable(s)
-check_utils()
-{
- check_exec 'JOBREG=`which $JOBREG`' "Checkig $JOBREG utility" || exit 1
- check_exec 'JOBLOG=`which $JOBLOG`' "Checkig $JOBLOG utility" || exit 1
- check_exec 'LOGEV=`which $LOGEV`' "Checkig $LOGEV utility" || exit 1
- check_exec 'USERJOBS=`which $USERJOBS`' "Checkig $USERJOBS utility" || exit 1
- check_exec 'JOBSTAT=`which $JOBSTAT`' "Checkig $JOBSTAT utility" || exit 1
-}
-
-log_ev()
-{
-# $LOGEV -j $EDG_JOBID -s NetworkServer -n $1 -e UserTag --name color --value red
- [ $DEBUG -gt 2 ] && echo "$LOGEV -j \"$EDG_JOBID\" -s UserInterface -c \"$EDG_WL_SEQUENCE\" $@"
- EDG_WL_SEQUENCE=`$LOGEV $LARGE_STRESS -j $EDG_JOBID -s UserInterface -c $EDG_WL_SEQUENCE "$@"`
- test $? -ne 0 -o -z "$EDG_WL_SEQUENCE" && echo "missing EDG_WL_SEQUENCE from $LOGEV"
-}
-
-log_ev_proxy()
-{
-# $LOGEV -x -j $EDG_JOBID -s NetworkServer -n $1 -e UserTag --name color --value red
-
- [ $DEBUG -gt 2 ] && echo "$LOGEV -x -j \"$EDG_JOBID\" -s UserInterface -c \"$EDG_WL_SEQUENCE\" $@"
- EDG_WL_SEQUENCE=`$LOGEV -x $LARGE_STRESS -j $EDG_JOBID -s UserInterface -c $EDG_WL_SEQUENCE "$@"`
- test $? -ne 0 -o -z "$EDG_WL_SEQUENCE" && echo "missing EDG_WL_SEQUENCE from $LOGEV"
-}
-
-purge()
-{
- [ $DEBUG -gt 2 ] && echo "$PURGE -a 0 -c 0 -n 0 -o 0 $@"
- $PURGE -a 0 -c 0 -n 0 -o 0 "$@"
-}
-
-purge_proxy()
-{
- [ $DEBUG -gt 2 ] && echo "$PURGE -x -a 0 -c 0 -n 0 -o 0 $@"
- $PURGE -x -a 0 -c 0 -n 0 -o 0 "$@"
-}
-
-
-db_clear_jobs()
-{
- [ $DEBUG -gt 0 ] && echo -n -e "Purging test jobs from db\t\t"
- job=0
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
- LARGE_STRESS=""
- EDG_WL_SEQUENCE="UI=999999:NS=9999999999:WM=999999:BH=9999999999:JSS=999999:LM=999999:LRMS=999999:APP=999999"
-# log_ev_proxy -e Clear --reason=PurgingDB
-# purge_proxy
-# log_ev -e Clear --reason=PurgingDB
-# purge
-
- job=$(($job + 1))
- done
- [ $DEBUG -gt 0 ] && echo "OK"
-}
-
-# Test thet registers jobs
-# and checks against lbproxy and bkserver
-#
-test_gen_sample_jobs()
-{
- [ $DEBUG -gt 0 ] && echo -n -e "Registering sample jobs\t\t\t"
- job=0
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
-# eval `$JOBREG -x -m $BKSERVER_HOST -s UserInterface 2>&1 | tail -n 2`
- TMP=`$JOBREG -x -m $BKSERVER_HOST -s UserInterface 2>&1`
- [ $? -ne 0 ] && echo -e "ERROR\n\t$JOBREG error!"
- eval `echo "$TMP" | tail -n 2`
- if test -z "$EDG_JOBID" ; then
- echo "test_gen_sample_jobs: $JOBREG failed"
- else
- SAMPLE_JOBS_ARRAY[$job]=$EDG_JOBID
- fi
-
-# state=`$JOBSTAT $EDG_JOBID 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-# proxy_state=`$JOBSTAT -x $TEST_LBPROXY_SERVE_SOCK $EDG_JOBID 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-# if test "$state" != "submitted" ; then
-# echo -e "ERROR\n\tjob ${SAMPLE_JOBS_ARRAY[$job]} not submitted succesfully!"
-# fi
-# if test "$state" != "$proxy_state" ; then
-# echo -e "ERROR\n\tjob (${SAMPLE_JOBS_ARRAY[$job]}) records on lbproxy and bkserver differs!"
-# fi
-# SAMPLE_JOBS_STATES[$job]=$state
- echo -n "."
- job=$(($job + 1))
- done
- [ $DEBUG -gt 0 ] && echo "OK"
-# [ $DEBUG -gt 1 ] && {
-# job=0
-# while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
-# echo ${SAMPLE_JOBS_ARRAY[$job]}
-# job=$(($job + 1))
-# done
-# }
-}
-
-# Test that logs random set of events (for registered jobs) to lbproxy
-# and checks the state in lbproxy
-# and measures the time it takes the state to propagate to bkserver
-#
-test_logging_events()
-{
- [ $DEBUG -gt 0 ] && echo -n -e "Logging events to the lbproxy\t\t"
- st_count=`echo $STATES | wc -w`
- job=0
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
- echo -n "."
- if test -z "${SAMPLE_JOBS_ARRAY[$job]}" ; then
- job=$(($job + 1))
- continue
- fi
-# tmp=`echo $RANDOM % $st_count + 1 | bc`
-# state=`echo $STATES | cut -d " " -f $tmp | tr A-Z a-z`
- get_time
- start=$time
-
-# source glite-lb-$state.sh $LARGE_STRESS -X $TEST_LBPROXY_STORE_SOCK -m $BKSERVER_HOST -j ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 1>/dev/null
-# [ $? -ne 0 ] && echo -e "ERROR\n\tglite-lb-$state.sh ${SAMPLE_JOBS_ARRAY[$job]} error!"
- log_ev_proxy -n 100 -e UserTag --tag=color --value=red
-
-# proxy_state=`$JOBSTAT -x $TEST_LBPROXY_SERVE_SOCK ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-# purged=`echo $LBPROXY_PURGE_STATES | grep $state`
-# bkserver_state=`$JOBSTAT ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-#
-# if test -n "$purged" ; then
-# echo $proxy_state | grep "No such file or directory"
-# if test $? -eq 0 ; then
-# echo -e "ERROR\n\tJob ${SAMPLE_JOBS_ARRAY[$job]} was not purged out from LBProxy!"
-# exit 1;
-# fi
-# fi
-# if test -z "$purged" ; then
-# if test "$state" != "$proxy_state" ; then
-# echo -e "ERROR\n\tevents for job ${SAMPLE_JOBS_ARRAY[$job]} were not logged succesfully!"
-# exit 1;
-# fi
-# fi
-
-# response=0
-# while [ "$state" != "$bkserver_state" ] ; do
-# bkserver_state=`$JOBSTAT ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-# [ $DEBUG -gt 0 ] && echo -n "."
-# sleep $timeout
-# response=$(($response + $timeout ))
-# if test $response -gt $maxtimeout ; then
-# echo -e "ERROR\n\tstatus of job ${SAMPLE_JOBS_ARRAY[$job]} as queried from bkserver ($bkserver_state) has not become $state for more than $response seconds!"
-# exit 1;
-# fi
-# done
-#
-# SAMPLE_JOBS_STATES[$job]=$state
- get_time
- response=`echo "scale=9; ($time - $start)/1000000000"|bc`
- SAMPLE_JOBS_RESPONSES[$job]=$response
- job=$(($job + 1))
- done
- [ $DEBUG -gt 0 ] && echo "OK"
- [ $DEBUG -gt 1 ] && {
- job=0
- total=0
-# echo "Sending events took for individual jobs the following time"
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
- total=`echo "scale=9; $total + ${SAMPLE_JOBS_RESPONSES[$job]}" |bc`
-# echo -e "${SAMPLE_JOBS_ARRAY[$job]} \t${SAMPLE_JOBS_RESPONSES[$job]} seconds"
- job=$(($job + 1))
- done
- echo -e "Total time for $JOBS_ARRAY_SIZE jobs: \t$total"
- echo -e -n "Average time for job: \t"
- echo "scale=9; $total / $JOBS_ARRAY_SIZE"|bc
- echo -e -n "Job throughput (jobs/sec): \t"
- echo "scale=9; $JOBS_ARRAY_SIZE / $total"|bc
-
- }
-}
-
-
-#
-# shell starting code
-
-# without parameters show help message
-# test -z "$1" && show_help
-
-while test -n "$1"
-do
- case "$1" in
- "-h" | "--help") show_help && exit 0 ;;
- "-x" | "--proxy-sockpath-pref")
- shift
- export TEST_LBPROXY_STORE_SOCK=$1store.sock
- export TEST_LBPROXY_SERVE_SOCK=$1serve.sock
- ;;
- "-m" | "--bkserver") shift ; BKSERVER_HOST=$1 ;;
- "-j" | "--jobs-count") shift; JOBS_ARRAY_SIZE=$1 ;;
- "-s" | "--states") shift; STATES="$1" ;;
- "-p" | "--proxy-purge-states") shift; LBPROXY_PURGE_STATES="$1" ;;
- "-l" | "--large-stress") shift ; LARGE_STRESS="-l $1" ;;
- "-g" | "--log") shift ; logfile=$1 ;;
-
- *) echo "Unrecognized option $1" ;;
-
- esac
- shift
-done
-
-if test -n "$logfile" ; then
- LOGFD=3
- exec 3>$logfile
-fi
-
-
-echo "STATES = $STATES"
-echo "LBPROXY_PURGE_STATES = $LBPROXY_PURGE_STATES"
-
-check_utils
-
-test_gen_sample_jobs
-test_logging_events
-
-db_clear_jobs
+++ /dev/null
-#!/bin/sh
-
-# XXX: add path to the stage area
-PATH=/home/michal/shared/egee/jra1/stage/bin:/home/michal/shared/egee/jra1/stage/examples:$PATH
-
-#set -x
-
-# Binaries
-LOGEV=${LOGEV:-glite-lb-logevent}
-JOBLOG=${JOBLOG:-glite-lb-job_log}
-JOBREG=${JOBREG:-glite-lb-job_reg}
-USERJOBS=${USERJOBS:-glite-lb-user_jobs}
-JOBSTAT=${JOBSTAT:-glite-lb-job_status}
-PURGE=${PURGE:-glite-lb-purge}
-
-# -m host
-BKSERVER_HOST=${BKSERVER_HOST:-`hostname -f`:9000}
-TEST_LBPROXY_STORE_SOCK=${EDG_WL_LBPROXY_STORE_SOCK:-/tmp/lb_proxy_store.sock}
-TEST_LBPROXY_SERVE_SOCK=${EDG_WL_LBPROXY_SERVE_SOCK:-/tmp/lb_proxy_serve.sock}
-
-STATES="aborted cancelled done ready running scheduled waiting"
-LBPROXY_PURGE_STATES="cleared done aborted cancelled"
-JOBS_ARRAY_SIZE=10
-EVENT_NUMBER=50
-SAMPLE_JOBS_ARRAY[0]=
-SAMPLE_JOBS_STATES[0]=
-SAMPLE_JOBS_RESPONSES[0]=
-
-# some defaults
-DEBUG=2
-LOGFD=${LOGFD:-1}
-LARGE_STRESS=${LARGE_STRESS:-}
-
-# timeouts for polling the bkserver
-timeout=10
-maxtimeout=300
-
-#
-# Procedures
-#
-
-# print help message
-show_help()
-{
- echo "Usage: $0 [OPTIONS] "
- echo "Options:"
- echo " -h | --help Show this help message."
- echo " -x | --proxy-sockpath-pref LBProxy socket path prefix."
- echo " -j | --jobs-count Count of test(ed) jobs."
- echo " -n | --event-count Number of events per job."
- echo " -s | --states List of states in which could tested jobs fall."
- echo " -p | --proxy-purge-states List of states in which LBProxy purges the job."
- echo " -l | --large-stress 'size' Do a large stress logging ('size' random data added to the messages."
- echo " -g | --log 'logfile' Redirect all output to the 'logfile'."
- echo ""
- echo "For proper operation check your grid-proxy-info"
- grid-proxy-info
-}
-
-get_time()
-{
- sec=`date +%s`
- nsec=`date +%N`
- time=`echo "1000000000*$sec + $nsec"|bc`
-# time=$sec
- return 0
-}
-
-check_exec()
-{
- [ $DEBUG -gt 0 ] && [ -n "$2" ] && echo -n -e "$2\t" || echo -n -e "$1\t"
- eval $1
- RV=$?
- [ $DEBUG -gt 0 ] && [ $RV -eq 0 ] && echo "OK" || echo "FAILED"
- return $RV
-}
-
-# check for existance of needed executable(s)
-check_utils()
-{
- check_exec 'JOBREG=`which $JOBREG`' "Checkig $JOBREG utility" || exit 1
- check_exec 'JOBLOG=`which $JOBLOG`' "Checkig $JOBLOG utility" || exit 1
- check_exec 'LOGEV=`which $LOGEV`' "Checkig $LOGEV utility" || exit 1
- check_exec 'USERJOBS=`which $USERJOBS`' "Checkig $USERJOBS utility" || exit 1
- check_exec 'JOBSTAT=`which $JOBSTAT`' "Checkig $JOBSTAT utility" || exit 1
-}
-
-log_ev()
-{
-# $LOGEV -j $EDG_JOBID -s NetworkServer -n $1 -e UserTag --name color --value red
- [ $DEBUG -gt 2 ] && echo "$LOGEV -j \"$EDG_JOBID\" -s UserInterface -c \"$EDG_WL_SEQUENCE\" $@"
- EDG_WL_SEQUENCE=`$LOGEV $LARGE_STRESS -j $EDG_JOBID -s UserInterface -c $EDG_WL_SEQUENCE "$@"`
- test $? -ne 0 -o -z "$EDG_WL_SEQUENCE" && echo "missing EDG_WL_SEQUENCE from $LOGEV"
-}
-
-log_ev_proxy()
-{
-# $LOGEV -x -j $EDG_JOBID -s NetworkServer -n $1 -e UserTag --name color --value red
-
- [ $DEBUG -gt 2 ] && echo "$LOGEV -x -j \"$EDG_JOBID\" -s UserInterface -c \"$EDG_WL_SEQUENCE\" $@"
- EDG_WL_SEQUENCE=`$LOGEV -x $LARGE_STRESS -j $EDG_JOBID -s UserInterface -c $EDG_WL_SEQUENCE "$@"`
- test $? -ne 0 -o -z "$EDG_WL_SEQUENCE" && echo "missing EDG_WL_SEQUENCE from $LOGEV"
-}
-
-purge()
-{
- [ $DEBUG -gt 2 ] && echo "$PURGE -a 0 -c 0 -n 0 -o 0 $@"
- $PURGE -a 0 -c 0 -n 0 -o 0 "$@"
-}
-
-purge_proxy()
-{
- [ $DEBUG -gt 2 ] && echo "$PURGE -x -a 0 -c 0 -n 0 -o 0 $@"
- $PURGE -x -a 0 -c 0 -n 0 -o 0 "$@"
-}
-
-
-db_clear_jobs()
-{
- [ $DEBUG -gt 0 ] && echo -n -e "Purging test jobs from db\t\t"
- job=0
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
- LARGE_STRESS=""
- EDG_WL_SEQUENCE="UI=999999:NS=9999999999:WM=999999:BH=9999999999:JSS=999999:LM=999999:LRMS=999999:APP=999999"
-# log_ev_proxy -e Clear --reason=PurgingDB
-# purge_proxy
-# log_ev -e Clear --reason=PurgingDB
-# purge
-
- job=$(($job + 1))
- done
- [ $DEBUG -gt 0 ] && echo "OK"
-}
-
-# Test thet registers jobs
-# and checks against lbproxy and bkserver
-#
-test_gen_sample_jobs()
-{
- [ $DEBUG -gt 0 ] && echo -n -e "Registering sample jobs\t\t\t"
- job=0
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
-# eval `$JOBREG -x -m $BKSERVER_HOST -s UserInterface 2>&1 | tail -n 2`
- TMP=`$JOBREG -x -m $BKSERVER_HOST -s UserInterface 2>&1`
- [ $? -ne 0 ] && echo -e "ERROR\n\t$JOBREG error!"
- eval `echo "$TMP" | tail -n 2`
- if test -z "$EDG_JOBID" ; then
- echo "test_gen_sample_jobs: $JOBREG failed"
- else
- SAMPLE_JOBS_ARRAY[$job]=$EDG_JOBID
- fi
-
-# state=`$JOBSTAT $EDG_JOBID 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-# proxy_state=`$JOBSTAT -x $TEST_LBPROXY_SERVE_SOCK $EDG_JOBID 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-# if test "$state" != "submitted" ; then
-# echo -e "ERROR\n\tjob ${SAMPLE_JOBS_ARRAY[$job]} not submitted succesfully!"
-# fi
-# if test "$state" != "$proxy_state" ; then
-# echo -e "ERROR\n\tjob (${SAMPLE_JOBS_ARRAY[$job]}) records on lbproxy and bkserver differs!"
-# fi
-# SAMPLE_JOBS_STATES[$job]=$state
- echo -n "."
- job=$(($job + 1))
- done
- [ $DEBUG -gt 0 ] && echo "OK"
-# [ $DEBUG -gt 1 ] && {
-# job=0
-# while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
-# echo ${SAMPLE_JOBS_ARRAY[$job]}
-# job=$(($job + 1))
-# done
-# }
-}
-
-# Test that logs random set of events (for registered jobs) to lbproxy
-# and checks the state in lbproxy
-# and measures the time it takes the state to propagate to bkserver
-#
-test_logging_events()
-{
- [ $DEBUG -gt 0 ] && echo -n -e "Logging events to the lbproxy\t\t"
- st_count=`echo $STATES | wc -w`
- job=0
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
- echo -n "."
- if test -z "${SAMPLE_JOBS_ARRAY[$job]}" ; then
- job=$(($job + 1))
- continue
- fi
-# tmp=`echo $RANDOM % $st_count + 1 | bc`
-# state=`echo $STATES | cut -d " " -f $tmp | tr A-Z a-z`
- get_time
- start=$time
-
-# source glite-lb-$state.sh $LARGE_STRESS -X $TEST_LBPROXY_STORE_SOCK -m $BKSERVER_HOST -j ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 1>/dev/null
-# [ $? -ne 0 ] && echo -e "ERROR\n\tglite-lb-$state.sh ${SAMPLE_JOBS_ARRAY[$job]} error!"
- log_ev_proxy -n $EVENT_NUMBER -e UserTag --tag=color --value=red
-
-# proxy_state=`$JOBSTAT -x $TEST_LBPROXY_SERVE_SOCK ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-# purged=`echo $LBPROXY_PURGE_STATES | grep $state`
-# bkserver_state=`$JOBSTAT ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-#
-# if test -n "$purged" ; then
-# echo $proxy_state | grep "No such file or directory"
-# if test $? -eq 0 ; then
-# echo -e "ERROR\n\tJob ${SAMPLE_JOBS_ARRAY[$job]} was not purged out from LBProxy!"
-# exit 1;
-# fi
-# fi
-# if test -z "$purged" ; then
-# if test "$state" != "$proxy_state" ; then
-# echo -e "ERROR\n\tevents for job ${SAMPLE_JOBS_ARRAY[$job]} were not logged succesfully!"
-# exit 1;
-# fi
-# fi
-
-# response=0
-# while [ "$state" != "$bkserver_state" ] ; do
-# bkserver_state=`$JOBSTAT ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-# [ $DEBUG -gt 0 ] && echo -n "."
-# sleep $timeout
-# response=$(($response + $timeout ))
-# if test $response -gt $maxtimeout ; then
-# echo -e "ERROR\n\tstatus of job ${SAMPLE_JOBS_ARRAY[$job]} as queried from bkserver ($bkserver_state) has not become $state for more than $response seconds!"
-# exit 1;
-# fi
-# done
-#
-# SAMPLE_JOBS_STATES[$job]=$state
- get_time
- response=`echo "scale=9; ($time - $start)/1000000000"|bc`
- SAMPLE_JOBS_RESPONSES[$job]=$response
- job=$(($job + 1))
- done
- [ $DEBUG -gt 0 ] && echo "OK"
- [ $DEBUG -gt 1 ] && {
- job=0
- total=0
-# echo "Sending events took for individual jobs the following time"
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
- total=`echo "scale=9; $total + ${SAMPLE_JOBS_RESPONSES[$job]}" |bc`
-# echo -e "${SAMPLE_JOBS_ARRAY[$job]} \t${SAMPLE_JOBS_RESPONSES[$job]} seconds"
- job=$(($job + 1))
- done
- echo -e "Total time for $JOBS_ARRAY_SIZE jobs: \t$total"
- echo -e -n "Average time for event: \t"
- echo "scale=9; $total / $JOBS_ARRAY_SIZE / $EVENT_NUMBER"|bc
- echo -e -n "Event throughput (events/sec): \t"
- echo "scale=9; $EVENT_NUMBER * $JOBS_ARRAY_SIZE / $total"|bc
-
- }
-}
-
-
-#
-# shell starting code
-
-# without parameters show help message
-# test -z "$1" && show_help
-
-while test -n "$1"
-do
- case "$1" in
- "-h" | "--help") show_help && exit 0 ;;
- "-x" | "--proxy-sockpath-pref")
- shift
- export TEST_LBPROXY_STORE_SOCK=$1store.sock
- export TEST_LBPROXY_SERVE_SOCK=$1serve.sock
- ;;
- "-m" | "--bkserver") shift ; BKSERVER_HOST=$1 ;;
- "-j" | "--jobs-count") shift; JOBS_ARRAY_SIZE=$1 ;;
- "-n" | "--event-count") shift; EVENT_NUMBER=$1 ;;
- "-s" | "--states") shift; STATES="$1" ;;
- "-p" | "--proxy-purge-states") shift; LBPROXY_PURGE_STATES="$1" ;;
- "-l" | "--large-stress") shift ; LARGE_STRESS="-l $1" ;;
- "-g" | "--log") shift ; logfile=$1 ;;
-
- *) echo "Unrecognized option $1" ;;
-
- esac
- shift
-done
-
-if test -n "$logfile" ; then
- LOGFD=3
- exec 3>$logfile
-fi
-
-
-echo "STATES = $STATES"
-echo "LBPROXY_PURGE_STATES = $LBPROXY_PURGE_STATES"
-
-check_utils
-
-test_gen_sample_jobs
-test_logging_events
-
-db_clear_jobs
+++ /dev/null
-#!/bin/sh
-
-# XXX: add path to the stage area
-PATH=/home/michal/shared/egee/jra1/stage/bin:/home/michal/shared/egee/jra1/stage/examples:$PATH
-
-#set -x
-
-# Binaries
-LOGEV=${LOGEV:-glite-lb-logevent}
-JOBLOG=${JOBLOG:-glite-lb-job_log}
-JOBREG=${JOBREG:-glite-lb-job_reg}
-USERJOBS=${USERJOBS:-glite-lb-user_jobs}
-JOBSTAT=${JOBSTAT:-glite-lb-job_status}
-PURGE=${PURGE:-glite-lb-purge}
-
-# -m host
-BKSERVER_HOST=${BKSERVER_HOST:-`hostname -f`:9000}
-TEST_LBPROXY_STORE_SOCK=${EDG_WL_LBPROXY_STORE_SOCK:-/tmp/lb_proxy_store.sock}
-TEST_LBPROXY_SERVE_SOCK=${EDG_WL_LBPROXY_SERVE_SOCK:-/tmp/lb_proxy_serve.sock}
-
-STATES="aborted cancelled done ready running scheduled waiting"
-LBPROXY_PURGE_STATES="cleared done aborted cancelled"
-JOBS_ARRAY_SIZE=10
-SAMPLE_JOBS_ARRAY[0]=
-SAMPLE_JOBS_STATES[0]=
-SAMPLE_JOBS_RESPONSES[0]=
-
-# some defaults
-DEBUG=2
-LOGFD=${LOGFD:-1}
-LARGE_STRESS=${LARGE_STRESS:-}
-
-# timeouts for polling the bkserver
-timeout=10
-maxtimeout=300
-
-#
-# Procedures
-#
-
-# print help message
-show_help()
-{
- echo "Usage: $0 [OPTIONS] "
- echo "Options:"
- echo " -h | --help Show this help message."
- echo " -x | --proxy-sockpath-pref LBProxy socket path prefix."
- echo " -j | --jobs-count Count of test(ed) jobs."
- echo " -n | --subjobs Number of subjobs."
- echo " -s | --states List of states in which could tested jobs fall."
- echo " -p | --proxy-purge-states List of states in which LBProxy purges the job."
- echo " -l | --large-stress 'size' Do a large stress logging ('size' random data added to the messages."
- echo " -g | --log 'logfile' Redirect all output to the 'logfile'."
- echo ""
- echo "For proper operation check your grid-proxy-info"
- grid-proxy-info
-}
-
-get_time()
-{
- sec=`date +%s`
- nsec=`date +%N`
- time=`echo "1000000000*$sec + $nsec"|bc`
-# time=$sec
- return 0
-}
-
-check_exec()
-{
- [ $DEBUG -gt 0 ] && [ -n "$2" ] && echo -n -e "$2\t" || echo -n -e "$1\t"
- eval $1
- RV=$?
- [ $DEBUG -gt 0 ] && [ $RV -eq 0 ] && echo "OK" || echo "FAILED"
- return $RV
-}
-
-# check for existance of needed executable(s)
-check_utils()
-{
- check_exec 'JOBREG=`which $JOBREG`' "Checkig $JOBREG utility" || exit 1
- check_exec 'JOBLOG=`which $JOBLOG`' "Checkig $JOBLOG utility" || exit 1
- check_exec 'LOGEV=`which $LOGEV`' "Checkig $LOGEV utility" || exit 1
- check_exec 'USERJOBS=`which $USERJOBS`' "Checkig $USERJOBS utility" || exit 1
- check_exec 'JOBSTAT=`which $JOBSTAT`' "Checkig $JOBSTAT utility" || exit 1
-}
-
-log_ev()
-{
-# $LOGEV -j $EDG_JOBID -s NetworkServer -n $1 -e UserTag --name color --value red
- [ $DEBUG -gt 2 ] && echo "$LOGEV -j \"$EDG_JOBID\" -s UserInterface -c \"$EDG_WL_SEQUENCE\" $@"
- EDG_WL_SEQUENCE=`$LOGEV $LARGE_STRESS -j $EDG_JOBID -s UserInterface -c $EDG_WL_SEQUENCE "$@"`
- test $? -ne 0 -o -z "$EDG_WL_SEQUENCE" && echo "missing EDG_WL_SEQUENCE from $LOGEV"
-}
-
-log_ev_proxy()
-{
-# $LOGEV -x -j $EDG_JOBID -s NetworkServer -n $1 -e UserTag --name color --value red
-
- [ $DEBUG -gt 2 ] && echo "$LOGEV -x -j \"$EDG_JOBID\" -s UserInterface -c \"$EDG_WL_SEQUENCE\" $@"
- EDG_WL_SEQUENCE=`$LOGEV -x $LARGE_STRESS -j $EDG_JOBID -s UserInterface -c $EDG_WL_SEQUENCE "$@"`
- test $? -ne 0 -o -z "$EDG_WL_SEQUENCE" && echo "missing EDG_WL_SEQUENCE from $LOGEV"
-}
-
-purge()
-{
- [ $DEBUG -gt 2 ] && echo "$PURGE -a 0 -c 0 -n 0 -o 0 $@"
- $PURGE -a 0 -c 0 -n 0 -o 0 "$@"
-}
-
-purge_proxy()
-{
- [ $DEBUG -gt 2 ] && echo "$PURGE -x -a 0 -c 0 -n 0 -o 0 $@"
- $PURGE -x -a 0 -c 0 -n 0 -o 0 "$@"
-}
-
-
-db_clear_jobs()
-{
- [ $DEBUG -gt 0 ] && echo -n -e "Purging test jobs from db\t\t"
- job=0
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
- LARGE_STRESS=""
- EDG_WL_SEQUENCE="UI=999999:NS=9999999999:WM=999999:BH=9999999999:JSS=999999:LM=999999:LRMS=999999:APP=999999"
-# log_ev_proxy -e Clear --reason=PurgingDB
-# purge_proxy
-# log_ev -e Clear --reason=PurgingDB
-# purge
-
- job=$(($job + 1))
- done
- [ $DEBUG -gt 0 ] && echo "OK"
-}
-
-# Test thet registers jobs
-# and checks against lbproxy and bkserver
-#
-test_gen_sample_jobs()
-{
- [ $DEBUG -gt 0 ] && echo -n -e "Registering sample jobs\t\t\t"
- job=0
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
-# eval `$JOBREG -x -m $BKSERVER_HOST -s UserInterface 2>&1 | tail -n 2`
- get_time
- start=$time
- [ $? -ne 0 ] && echo -e "ERROR\n\t$JOBREG error!"
- if [[ -z $SUBJOBS ]] ; then
- TMP=`$JOBREG -x -m $BKSERVER_HOST -s UserInterface 2>&1`
- get_time
- eval `echo "$TMP" | tail -n 2`
- else
- TMP=`$JOBREG -x -m $BKSERVER_HOST -s UserInterface -n $SUBJOBS 2>&1`
- get_time
- eval `echo "$TMP" | grep DAG_JOBID`
- EDG_JOBID=$EDG_WL_DAG_JOBID
- fi
- if test -z "$EDG_JOBID" ; then
- echo "test_gen_sample_jobs: $JOBREG failed"
- else
- SAMPLE_JOBS_ARRAY[$job]=$EDG_JOBID
- response=`echo "scale=9; ($time - $start)/1000000000"|bc`
- SAMPLE_JOBS_RESPONSES[$job]=$response
- fi
-
-# state=`$JOBSTAT $EDG_JOBID 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-# proxy_state=`$JOBSTAT -x $TEST_LBPROXY_SERVE_SOCK $EDG_JOBID 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-# if test "$state" != "submitted" ; then
-# echo -e "ERROR\n\tjob ${SAMPLE_JOBS_ARRAY[$job]} not submitted succesfully!"
-# fi
-# if test "$state" != "$proxy_state" ; then
-# echo -e "ERROR\n\tjob (${SAMPLE_JOBS_ARRAY[$job]}) records on lbproxy and bkserver differs!"
-# fi
-# SAMPLE_JOBS_STATES[$job]=$state
- echo -n "."
- job=$(($job + 1))
- done
- [ $DEBUG -gt 0 ] && echo "OK"
- [ $DEBUG -gt 1 ] && {
- job=0
- total=0
-# echo "Registration took for individual jobs the following time"
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
- total=`echo "scale=9; $total + ${SAMPLE_JOBS_RESPONSES[$job]}" |bc`
-# echo -e "${SAMPLE_JOBS_ARRAY[$job]} \t${SAMPLE_JOBS_RESPONSES[$job]} seconds"
- job=$(($job + 1))
- done
- echo "Registration results:"
- echo -e "Total time for $JOBS_ARRAY_SIZE jobs with $SUBJOBS subjobs: \t$total"
- echo -e -n "Average time for registration: \t"
- echo "scale=9; $total / $JOBS_ARRAY_SIZE / $SUBJOBS"|bc
- echo -e -n "Registration throughput (jobs/sec): \t"
- echo "scale=9; $SUBJOBS * $JOBS_ARRAY_SIZE / $total"|bc
-
- }
-# [ $DEBUG -gt 1 ] && {
-# job=0
-# while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
-# echo ${SAMPLE_JOBS_ARRAY[$job]}
-# job=$(($job + 1))
-# done
-# }
-}
-
-# Test that logs random set of events (for registered jobs) to lbproxy
-# and checks the state in lbproxy
-# and measures the time it takes the state to propagate to bkserver
-#
-test_logging_events()
-{
- [ $DEBUG -gt 0 ] && echo -n -e "Logging events to the lbproxy\t\t"
- st_count=`echo $STATES | wc -w`
- job=0
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
- echo -n "."
- if test -z "${SAMPLE_JOBS_ARRAY[$job]}" ; then
- job=$(($job + 1))
- continue
- fi
-# tmp=`echo $RANDOM % $st_count + 1 | bc`
-# state=`echo $STATES | cut -d " " -f $tmp | tr A-Z a-z`
- get_time
- start=$time
-
-# source glite-lb-$state.sh $LARGE_STRESS -X $TEST_LBPROXY_STORE_SOCK -m $BKSERVER_HOST -j ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 1>/dev/null
-# [ $? -ne 0 ] && echo -e "ERROR\n\tglite-lb-$state.sh ${SAMPLE_JOBS_ARRAY[$job]} error!"
- log_ev_proxy -n 100 -e UserTag --tag=color --value=red
-
-# proxy_state=`$JOBSTAT -x $TEST_LBPROXY_SERVE_SOCK ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-# purged=`echo $LBPROXY_PURGE_STATES | grep $state`
-# bkserver_state=`$JOBSTAT ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-#
-# if test -n "$purged" ; then
-# echo $proxy_state | grep "No such file or directory"
-# if test $? -eq 0 ; then
-# echo -e "ERROR\n\tJob ${SAMPLE_JOBS_ARRAY[$job]} was not purged out from LBProxy!"
-# exit 1;
-# fi
-# fi
-# if test -z "$purged" ; then
-# if test "$state" != "$proxy_state" ; then
-# echo -e "ERROR\n\tevents for job ${SAMPLE_JOBS_ARRAY[$job]} were not logged succesfully!"
-# exit 1;
-# fi
-# fi
-
-# response=0
-# while [ "$state" != "$bkserver_state" ] ; do
-# bkserver_state=`$JOBSTAT ${SAMPLE_JOBS_ARRAY[$job]} 2>&1 | grep "state :" | cut -d " " -f 3 | tr A-Z a-z`
-# [ $DEBUG -gt 0 ] && echo -n "."
-# sleep $timeout
-# response=$(($response + $timeout ))
-# if test $response -gt $maxtimeout ; then
-# echo -e "ERROR\n\tstatus of job ${SAMPLE_JOBS_ARRAY[$job]} as queried from bkserver ($bkserver_state) has not become $state for more than $response seconds!"
-# exit 1;
-# fi
-# done
-#
-# SAMPLE_JOBS_STATES[$job]=$state
- get_time
- response=`echo "scale=9; ($time - $start)/1000000000"|bc`
- SAMPLE_JOBS_RESPONSES[$job]=$response
- job=$(($job + 1))
- done
- [ $DEBUG -gt 0 ] && echo "OK"
- [ $DEBUG -gt 1 ] && {
- job=0
- total=0
-# echo "Sending events took for individual jobs the following time"
- while [ $job -lt $JOBS_ARRAY_SIZE ] ; do
- total=`echo "scale=9; $total + ${SAMPLE_JOBS_RESPONSES[$job]}" |bc`
-# echo -e "${SAMPLE_JOBS_ARRAY[$job]} \t${SAMPLE_JOBS_RESPONSES[$job]} seconds"
- job=$(($job + 1))
- done
- echo -e "Total time for $JOBS_ARRAY_SIZE jobs: \t$total"
- echo -e -n "Average time for job: \t"
- echo "scale=9; $total / $JOBS_ARRAY_SIZE"|bc
- echo -e -n "Job throughput (jobs/sec): \t"
- echo "scale=9; $JOBS_ARRAY_SIZE / $total"|bc
-
- }
-}
-
-
-#
-# shell starting code
-
-# without parameters show help message
-# test -z "$1" && show_help
-
-while test -n "$1"
-do
- case "$1" in
- "-h" | "--help") show_help && exit 0 ;;
- "-x" | "--proxy-sockpath-pref")
- shift
- export TEST_LBPROXY_STORE_SOCK=$1store.sock
- export TEST_LBPROXY_SERVE_SOCK=$1serve.sock
- ;;
- "-m" | "--bkserver") shift ; BKSERVER_HOST=$1 ;;
- "-j" | "--jobs-count") shift; JOBS_ARRAY_SIZE=$1 ;;
- "-n" | "--subjobs") shift; SUBJOBS="$1" ;;
- "-s" | "--states") shift; STATES="$1" ;;
- "-p" | "--proxy-purge-states") shift; LBPROXY_PURGE_STATES="$1" ;;
- "-l" | "--large-stress") shift ; LARGE_STRESS="-l $1" ;;
- "-g" | "--log") shift ; logfile=$1 ;;
-
- *) echo "Unrecognized option $1" ;;
-
- esac
- shift
-done
-
-if test -n "$logfile" ; then
- LOGFD=3
- exec 3>$logfile
-fi
-
-
-echo "STATES = $STATES"
-echo "LBPROXY_PURGE_STATES = $LBPROXY_PURGE_STATES"
-
-check_utils
-
-test_gen_sample_jobs
-#test_logging_events
-
-db_clear_jobs
+++ /dev/null
-#Mon Apr 03 07:51:54 CEST 2006
-module.build=0099
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- Configuration options for the GLite LB Proxy module
-
- Authors: Jiri Skrabal <nykolas@ics.muni.cz>
-
- Revision history:
-
-
--->
-
- <!-- ======================================================
- Define extra properties here ...
- ====================================================== -->
-
- <project name="LB Server configuration options">
- <target name="lbmakefiles">
- <exec executable="ln" failonerror="true">
- <arg line="-fs ${component.dir}/Makefile ${module.build.dir}/Makefile"/>
- </exec>
- <echo file="${module.build.dir}/Makefile.inc">
-top_srcdir=..
-builddir=build
-stagedir=${stage.abs.dir}
-distdir=${dist.dir}
-globalprefix=${global.prefix}
-lbprefix=${subsystem.prefix}
-package=${module.package.name}
-PREFIX=${install.dir}
-version=${module.version}
-glite_location=${with.glite.location}
-globus_prefix=${with.globus.prefix}
-expat_prefix=${with.expat.prefix}
-mysql_prefix=${with.mysql.prefix}
-mysql_version=${ext.mysql.version}
-gridsite_prefix=${with.gridsite.prefix}
-gsoap_prefix=${with.gsoap.prefix}
-cppunit_prefix=${with.cppunit.prefix}
-thrflavour=${with.globus.thr.flavor}
-nothrflavour=${with.globus.nothr.flavor}
- </echo>
- </target>
- </project>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- Common build properties file for the Glite LB Proxy component
-
- Authors: Jiri Skrabal <nykolas@ics.muni.cz>
-
- Revision history:
-
--->
-
-<project name="LB Proxy component common properties">
-
- <!-- Include build properties to allow overwriting
- of properties for subsystem -->
- <property file="build.properties" />
-
- <!-- ======================================================
- Define corresponding subsystem properties
- ====================================================== -->
-
- <!-- Subsystem name -->
- <property name="subsystem.name" value="${lb.subsystem.name}"/>
-
- <!-- Subsystem prefix -->
- <property name="subsystem.prefix" value="${lb.subsystem.prefix}"/>
-
- <!-- ======================================================
- Define component properties
- ====================================================== -->
-
- <!-- Component name prefix -->
- <property name="component.prefix" value="proxy" />
-
- <!-- ======================================================
- Define general component properties
- ====================================================== -->
-
- <import file="${component.general.properties.file}" />
-
- <!-- ======================================================
- Define extra properties here ...
- ====================================================== -->
-
-
-</project>
+++ /dev/null
-tar_exclude
-CVS
-build.xml
-build
-build.properties
-properties.xml
-configure-options.xml
-.cvsignore
-.project
-.cdtproject
+++ /dev/null
-#Fri Sep 02 14:18:53 CEST 2005
-module.version=1.3.0
-module.age=0
+++ /dev/null
-#ident "$Header$"
-
-#include <stdio.h>
-
-#include "glite/wmsutils/jobid/cjobid.h"
-#include "glite/lb/producer.h"
-#include "glite/lb/jobstat.h"
-
-char* write2rgma_statline(edg_wll_JobStat *stat)
-{
- fputs("fake write2rgma_statline()\n",stderr);
- return NULL;
-}
-
-void write2rgma_status(edg_wll_JobStat *stat)
-{
- fputs("fake write2rgma_statline()\n",stderr);
-}
-
-void write2rgma_chgstatus(edg_wll_JobStat *stat, char *prev_statline)
-{
- fputs("fake write2rgma_chgstatus()\n",stderr);
-}
+++ /dev/null
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <getopt.h>
-#include <linux/limits.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <sys/wait.h>
-#include <sys/socket.h>
-#include <sys/uio.h>
-#include <sys/un.h>
-#include <netinet/in.h>
-#include <signal.h>
-#include <errno.h>
-#include <netdb.h>
-#include <limits.h>
-#include <syslog.h>
-#include <sys/time.h>
-#include <arpa/inet.h>
-#include <arpa/nameser.h>
-#include <resolv.h>
-#include <sys/ipc.h>
-#include <sys/sem.h>
-
-#include "glite/lb/srvbones.h"
-#include "glite/lb/context.h"
-#include "glite/lb/context-int.h"
-#ifdef LB_PERF
-#include "glite/lb/lb_perftest.h"
-#include "glite/lb/srv_perf.h"
-
-enum lb_srv_perf_sink sink_mode;
-#endif
-
-extern int edg_wll_DBCheckVersion(edg_wll_Context);
-extern edg_wll_ErrorCode edg_wll_Open(edg_wll_Context ctx, char *cs);
-extern edg_wll_ErrorCode edg_wll_Close(edg_wll_Context);
-extern int edg_wll_StoreProtoProxy(edg_wll_Context ctx);
-extern int edg_wll_ServerHTTP(edg_wll_Context ctx);
-
-extern char *lbproxy_ilog_socket_path;
-extern char *lbproxy_ilog_file_prefix;
-
-
-#define DEFAULTCS "lbserver/@localhost:lbproxy"
-
-#define CON_QUEUE 20 /* accept() */
-#define SLAVE_OVERLOAD 10 /* queue items per slave */
-#define IDLE_TIMEOUT 10 /* keep idle connection that many seconds */
-#define REQUEST_TIMEOUT 120 /* one client may ask one slave multiple times */
-#define SLAVE_CONNS_MAX 500 /* commit suicide after that many connections */
-
-/* file to store pid and generate semaphores key
- */
-#ifndef GLITE_LBPROXY_PIDFILE
-#define GLITE_LBPROXY_PIDFILE "/var/run/glite-lbproxy.pid"
-#endif
-
-#ifndef GLITE_LBPROXY_SOCK_PREFIX
-#define GLITE_LBPROXY_SOCK_PREFIX "/tmp/lb_proxy_"
-#endif
-
-#ifndef dprintf
-#define dprintf(x) { if (debug) printf x; }
-#endif
-
-#define sizofa(a) (sizeof(a)/sizeof((a)[0]))
-
-
-int debug = 0;
-static const int one = 1;
-static char *dbstring = NULL;
-static char sock_store[PATH_MAX],
- sock_serve[PATH_MAX];
-static int slaves = 10,
- semaphores = -1,
- con_queue = CON_QUEUE,
- semset;
-static char host[300];
-static char * port;
-
-
-static struct option opts[] = {
- {"port", 1, NULL, 'p'},
- {"con-queue", 1, NULL, 'c'},
- {"debug", 0, NULL, 'd'},
- {"silent", 0, NULL, 'z'},
- {"mysql", 1, NULL, 'm'},
- {"slaves", 1, NULL, 's'},
- {"semaphores", 1, NULL, 'l'},
- {"pidfile", 1, NULL, 'i'},
- {"proxy-il-sock", 1, NULL, 'X'},
- {"proxy-il-fprefix", 1, NULL, 'Y'},
-#ifdef LB_PERF
- {"perf-sink", 1, NULL, 'K'},
-#endif
- {NULL,0,NULL,0}
-};
-
-static const char *get_opt_string = "p:c:dm:s:l:i:X:Y:z"
-#ifdef LB_PERF
- "K:"
-#endif
-;
-
-static void usage(char *me)
-{
- fprintf(stderr,"usage: %s [option]\n"
- "\t-p, --sock\t path-name to the local socket\n"
- "\t-c, --con-queue\t size of the connection queue (accept)\n"
- "\t-m, --mysql\t database connect string\n"
- "\t-d, --debug\t don't run as daemon, additional diagnostics\n"
- "\t-s, --slaves\t number of slave servers to fork\n"
- "\t-l, --semaphores number of semaphores (job locks) to use\n"
- "\t-i, --pidfile\t file to store master pid\n"
- "\t--proxy-il-sock\t socket to send events to\n"
- "\t--proxy-il-fprefix\t file prefix for events\n"
- "\t--silent\t don't print diagnostic, even if -d is on\n"
-#ifdef LB_PERF
- "\t--perf-sink\t where to sink events\n"
-#endif
- ,me);
-}
-
-static void wait_for_open(edg_wll_Context,const char *);
-static int decrement_timeout(struct timeval *, struct timeval, struct timeval);
-
-
-
-/*
- * SERVER BONES structures and handlers
- */
-int clnt_data_init(void **);
-
- /*
- * Serve & Store handlers
- */
-int clnt_reject(int);
-int handle_conn(int, struct timeval *, void *);
-int accept_serve(int, struct timeval *, void *);
-int accept_store(int, struct timeval *, void *);
-int clnt_disconnect(int, struct timeval *, void *);
-
-#define SRV_SERVE 0
-#define SRV_STORE 1
-static struct glite_srvbones_service service_table[] = {
- { "serve", -1, handle_conn, accept_serve, clnt_reject, clnt_disconnect },
- { "store", -1, handle_conn, accept_store, clnt_reject, clnt_disconnect },
-};
-
-struct clnt_data_t {
- edg_wll_Context ctx;
- void *mysql;
-};
-
-
-
-int main(int argc, char *argv[])
-{
- int i;
- struct sockaddr_un a;
- int opt;
- char pidfile[PATH_MAX] = GLITE_LBPROXY_PIDFILE,
- socket_path_prefix[PATH_MAX] = GLITE_LBPROXY_SOCK_PREFIX,
- *name;
- FILE *fpid;
- key_t semkey;
- edg_wll_Context ctx;
- struct timeval to;
- int silent = 0;
-
-
-
- name = strrchr(argv[0],'/');
- if (name) name++; else name = argv[0];
-
- if (geteuid()) snprintf(pidfile,sizeof pidfile,"%s/glite_lb_proxy.pid", getenv("HOME"));
-
- while ((opt = getopt_long(argc, argv, get_opt_string, opts, NULL)) != EOF) switch (opt) {
- case 'p': strcpy(socket_path_prefix, optarg); break;
- case 'c': con_queue = atoi(optarg); break;
- case 'd': debug = 1; break;
- case 'z': silent = 1; break;
- case 'm': dbstring = optarg; break;
- case 's': slaves = atoi(optarg); break;
- case 'l': semaphores = atoi(optarg); break;
- case 'X': lbproxy_ilog_socket_path = strdup(optarg); break;
- case 'Y': lbproxy_ilog_file_prefix = strdup(optarg); break;
- case 'i': strcpy(pidfile, optarg); break;
-#ifdef LB_PERF
- case 'K': sink_mode = atoi(optarg); break;
-#endif
- case '?': usage(name); return 1;
- }
-
- if ( optind < argc ) { usage(name); return 1; }
-
- setlinebuf(stdout);
- setlinebuf(stderr);
-
- fpid = fopen(pidfile,"r");
- if ( fpid ) {
- int opid = -1;
-
- if ( fscanf(fpid,"%d",&opid) == 1 ) {
- if ( !kill(opid,0) ) {
- fprintf(stderr,"%s: another instance running, pid = %d\n",argv[0],opid);
- return 1;
- }
- else if (errno != ESRCH) { perror("kill()"); return 1; }
- }
- fclose(fpid);
- } else if (errno != ENOENT) { perror(pidfile); return 1; }
-
- fpid = fopen(pidfile, "w");
- if ( !fpid ) { perror(pidfile); return 1; }
- fprintf(fpid, "%d", getpid());
- fclose(fpid);
-
- semkey = ftok(pidfile,0);
-
- if ( semaphores == -1 ) semaphores = slaves;
- semset = semget(semkey, 0, 0);
- if ( semset >= 0 ) semctl(semset, 0, IPC_RMID);
- semset = semget(semkey, semaphores, IPC_CREAT | 0600);
- if ( semset < 0 ) { perror("semget()"); return 1; }
- dprintf(("Using %d semaphores, set id %d\n", semaphores, semset));
- for ( i = 0; i < semaphores; i++ ) {
- struct sembuf s;
-
- s.sem_num = i; s.sem_op = 1; s.sem_flg = 0;
- if (semop(semset,&s,1) == -1) { perror("semop()"); return 1; }
- }
-
- gethostname(host, sizeof host);
- host[sizeof host - 1] = 0;
- asprintf(&port, "%d", GLITE_WMSC_JOBID_DEFAULT_PORT);
- dprintf(("server address: %s:%s\n", host, port));
-
- service_table[SRV_SERVE].conn = socket(PF_UNIX, SOCK_STREAM, 0);
- if ( service_table[SRV_SERVE].conn < 0 ) { perror("socket()"); return 1; }
- memset(&a, 0, sizeof(a));
- a.sun_family = AF_UNIX;
- sprintf(sock_serve, "%s%s", socket_path_prefix, "serve.sock");
- strcpy(a.sun_path, sock_serve);
-
- if( connect(service_table[SRV_SERVE].conn, (struct sockaddr *)&a, sizeof(a.sun_path)) < 0) {
- if( errno == ECONNREFUSED ) {
- dprintf(("removing stale input socket %s\n", sock_serve));
- unlink(sock_serve);
- }
- } else { perror("another instance of lb-proxy is running"); return 1; }
-
- if ( bind(service_table[SRV_SERVE].conn, (struct sockaddr *) &a, sizeof(a)) < 0 ) {
- char buf[100];
-
- snprintf(buf, sizeof(buf), "bind(%s)", sock_serve);
- perror(buf);
- return 1;
- }
-
- if ( listen(service_table[SRV_SERVE].conn, con_queue) ) { perror("listen()"); return 1; }
-
- service_table[SRV_STORE].conn = socket(PF_UNIX, SOCK_STREAM, 0);
- if ( service_table[SRV_STORE].conn < 0 ) { perror("socket()"); return 1; }
- memset(&a, 0, sizeof(a));
- a.sun_family = AF_UNIX;
- sprintf(sock_store, "%s%s", socket_path_prefix, "store.sock");
- strcpy(a.sun_path, sock_store);
-
- if( connect(service_table[SRV_STORE].conn, (struct sockaddr *)&a, sizeof(a.sun_path)) < 0) {
- if( errno == ECONNREFUSED ) {
- dprintf(("removing stale input socket %s\n", sock_store));
- unlink(sock_store);
- }
- } else { perror("another instance of lb-proxy is running"); return 1; }
-
- if ( bind(service_table[SRV_STORE].conn, (struct sockaddr *) &a, sizeof(a))) {
- char buf[100];
-
- snprintf(buf, sizeof(buf), "bind(%s)", sock_store);
- perror(buf);
- return 1;
- }
- if ( listen(service_table[SRV_STORE].conn, con_queue) ) { perror("listen()"); return 1; }
-
- dprintf(("Listening at %s, %s ...\n", sock_store, sock_serve));
-
- if (!dbstring) dbstring = getenv("LBPROXYDB");
- if (!dbstring) dbstring = DEFAULTCS;
-
-
- /* Just check the database and let it be. The slaves do the job. */
- edg_wll_InitContext(&ctx);
- /* XXX: obsolete
- * edg_wll_InitContext(&ctx) used to cause segfault
- if ( !(ctx = (edg_wll_Context) malloc(sizeof(*ctx))) ) {
- perror("InitContext()");
- return -1;
- }
- memset(ctx, 0, sizeof(*ctx));
- */
- wait_for_open(ctx, dbstring);
- if (edg_wll_DBCheckVersion(ctx)) {
- char *et,*ed;
- edg_wll_Error(ctx,&et,&ed);
-
- fprintf(stderr,"%s: open database: %s (%s)\n",argv[0],et,ed);
- return 1;
- }
- edg_wll_Close(ctx);
- edg_wll_FreeContext(ctx);
-
- if ( !debug ) {
- if ( daemon(1,0) == -1 ) { perror("deamon()"); exit(1); }
-
- fpid = fopen(pidfile,"w");
- if ( !fpid ) { perror(pidfile); return 1; }
- fprintf(fpid, "%d", getpid());
- fclose(fpid);
- openlog(name, LOG_PID, LOG_DAEMON);
- } else { setpgid(0, getpid()); }
-
- if (silent) debug = 0;
-
- glite_srvbones_set_param(GLITE_SBPARAM_SLAVES_COUNT, slaves);
- glite_srvbones_set_param(GLITE_SBPARAM_SLAVE_OVERLOAD, SLAVE_OVERLOAD);
- glite_srvbones_set_param(GLITE_SBPARAM_SLAVE_CONNS_MAX, SLAVE_CONNS_MAX);
- to = (struct timeval){REQUEST_TIMEOUT, 0};
- glite_srvbones_set_param(GLITE_SBPARAM_REQUEST_TIMEOUT, &to);
- to = (struct timeval){IDLE_TIMEOUT, 0};
- glite_srvbones_set_param(GLITE_SBPARAM_IDLE_TIMEOUT, &to);
-
- glite_srvbones_run(clnt_data_init, service_table, sizofa(service_table), debug);
-
- semctl(semset, 0, IPC_RMID, 0);
- unlink(pidfile);
- for ( i = 0; i < sizofa(service_table); i++ )
- if ( service_table[i].conn >= 0 ) close(service_table[i].conn);
- unlink(sock_serve);
- unlink(sock_store);
- if (port) free(port);
-
- return 0;
-}
-
-
-int clnt_data_init(void **data)
-{
- edg_wll_Context ctx;
- struct clnt_data_t *cdata;
-
-
- if ( !(cdata = calloc(1, sizeof(*cdata))) )
- return -1;
-
- if ( !(ctx = (edg_wll_Context) malloc(sizeof(*ctx))) ) { free(cdata); return -1; }
- memset(ctx, 0, sizeof(*ctx));
-
- dprintf(("[%d] opening database ...\n", getpid()));
- wait_for_open(ctx, dbstring);
- cdata->mysql = ctx->mysql;
- edg_wll_FreeContext(ctx);
-
-#ifdef LB_PERF
- glite_wll_perftest_init(NULL, NULL, NULL, NULL, 0);
-#endif
-
- *data = cdata;
- return 0;
-}
-
-
-int handle_conn(int conn, struct timeval *timeout, void *data)
-{
- struct clnt_data_t *cdata = (struct clnt_data_t *)data;
- edg_wll_Context ctx;
- struct timeval conn_start, now;
-
- if ( !(ctx = (edg_wll_Context) calloc(1, sizeof(*ctx))) ) {
- dprintf(("Couldn't create context"));
- return -1;
- }
- cdata->ctx = ctx;
-
- /* Shared structures (pointers)
- */
- ctx->mysql = cdata->mysql;
-
- /* set globals
- */
- ctx->allowAnonymous = 1;
- ctx->isProxy = 1;
- ctx->noAuth = 1;
- ctx->noIndex = 1;
- ctx->semset = semset;
- ctx->semaphores = semaphores;
-
- ctx->srvName = strdup(host);
- ctx->srvPort = atoi(port);
-
- ctx->connProxy = (edg_wll_ConnProxy *) calloc(1, sizeof(edg_wll_ConnProxy));
- if ( !ctx->connProxy ) {
- perror("calloc");
- edg_wll_FreeContext(ctx);
-
- return -1;
- }
-
- gettimeofday(&conn_start, 0);
- if ( edg_wll_plain_accept(conn, &ctx->connProxy->conn) ) {
- perror("accept");
- edg_wll_FreeContext(ctx);
-
- return -1;
- }
-
- gettimeofday(&now, 0);
- if ( decrement_timeout(timeout, conn_start, now) ) {
- if (debug) fprintf(stderr, "edg_wll_plain_accept() timeout");
- else syslog(LOG_ERR, "edg_wll_plain_accept(): timeout");
-
- return -1;
- }
-
-
- return 0;
-}
-
-
-int accept_store(int conn, struct timeval *timeout, void *cdata)
-{
- edg_wll_Context ctx = ((struct clnt_data_t *) cdata)->ctx;
- struct timeval before, after;
- char *errt, *errd;
- int err;
-
- memcpy(&ctx->p_tmp_timeout, timeout, sizeof(ctx->p_tmp_timeout));
- gettimeofday(&before, NULL);
- errt = errd = NULL;
- if ( edg_wll_StoreProtoProxy(ctx) ) {
- switch ( (err = edg_wll_Error(ctx, &errt, &errd)) ) {
- case ETIMEDOUT:
- case EPIPE:
- dprintf(("[%d] %s (%s)\n", getpid(), errt, errd));
- if (!debug) syslog(LOG_ERR,"%s (%s)", errt, errd);
- /* fallthrough
- */
- case ENOTCONN:
- free(errt); free(errd);
- return err;
- break;
-
- case ENOENT:
- case EINVAL:
- case EPERM:
- case EEXIST:
- case EDG_WLL_ERROR_NOINDEX:
- case E2BIG:
- dprintf(("[%d] %s (%s)\n", getpid(), errt, errd));
- if ( !debug ) syslog(LOG_ERR, "%s (%s)", errt, errd);
- break;
-
- default:
- dprintf(("[%d] %s (%s)\n", getpid(), errt, errd));
- if ( !debug ) syslog(LOG_CRIT, "%s (%s)", errt, errd);
- return -1;
- }
- free(errt); free(errd);
- } else if ( edg_wll_Error(ctx, &errt, &errd) ) {
- dprintf(("[%d] %s (%s)\n", getpid(), errt, errd));
- if ( !debug ) syslog(LOG_ERR, "%s (%s)", errt, errd);
- free(errt); free(errd);
- edg_wll_ResetError(ctx);
- }
- gettimeofday(&after, NULL);
- if ( decrement_timeout(timeout, before, after) ) {
- if (debug) fprintf(stderr, "Serving store connection timed out");
- else syslog(LOG_ERR, "Serving store connection timed out");
- return ETIMEDOUT;
- }
-
- return 0;
-}
-
-int accept_serve(int conn, struct timeval *timeout, void *cdata)
-{
- edg_wll_Context ctx = ((struct clnt_data_t *) cdata)->ctx;
- struct timeval before, after;
-
-
- /*
- * serve the request
- */
- memcpy(&ctx->p_tmp_timeout, timeout, sizeof(ctx->p_tmp_timeout));
- gettimeofday(&before, NULL);
- if ( edg_wll_ServerHTTP(ctx) ) {
- char *errt, *errd;
- int err;
-
-
- errt = errd = NULL;
- switch ( (err = edg_wll_Error(ctx, &errt, &errd)) ) {
- case ETIMEDOUT:
- case EPIPE:
- dprintf(("[%d] %s (%s)\n", getpid(), errt, errd));
- if (!debug) syslog(LOG_ERR,"%s (%s)", errt, errd);
- /* fallthrough
- */
- case ENOTCONN:
- free(errt); free(errd);
- return err;
- break;
-
- case ENOENT:
- case EINVAL:
- case EPERM:
- case EEXIST:
- case EDG_WLL_ERROR_NOINDEX:
- case E2BIG:
- dprintf(("[%d] %s (%s)\n", getpid(), errt, errd));
- if ( !debug ) syslog(LOG_ERR,"%s (%s)", errt, errd);
- /*
- * no action for non-fatal errors
- */
- break;
-
- default:
- dprintf(("[%d] %s (%s)\n", getpid(), errt, errd));
- if (!debug) syslog(LOG_CRIT,"%s (%s)",errt,errd);
- /*
- * unknown error - do rather return (<0) (slave will be killed)
- */
- return -1;
- }
- free(errt); free(errd);
- }
- gettimeofday(&after, NULL);
- if ( decrement_timeout(timeout, before, after) ) {
- if (debug) fprintf(stderr, "Serving store connection timed out");
- else syslog(LOG_ERR, "Serving store connection timed out");
- return ETIMEDOUT;
- }
-
- return 0;
-}
-
-
-int clnt_disconnect(int conn, struct timeval *timeout, void *cdata)
-{
- edg_wll_Context ctx = ((struct clnt_data_t *) cdata)->ctx;
-
- /* XXX: handle the timeout
- */
- if ( ctx->connProxy && ctx->connProxy->conn.sock >= 0 )
- edg_wll_plain_close(&ctx->connProxy->conn);
-
- edg_wll_FreeContext(ctx);
- ctx = NULL;
-
- return 0;
-}
-
-int clnt_reject(int conn)
-{
- return 0;
-}
-
-static void wait_for_open(edg_wll_Context ctx, const char *dbstring)
-{
- char *dbfail_string1, *dbfail_string2;
-
- dbfail_string1 = dbfail_string2 = NULL;
-
- while (edg_wll_Open(ctx, (char *) dbstring)) {
- char *errt,*errd;
-
- if (dbfail_string1) free(dbfail_string1);
- edg_wll_Error(ctx,&errt,&errd);
- asprintf(&dbfail_string1,"%s (%s)\n",errt,errd);
- if (dbfail_string1 != NULL) {
- if (dbfail_string2 == NULL || strcmp(dbfail_string1,dbfail_string2)) {
- if (dbfail_string2) free(dbfail_string2);
- dbfail_string2 = dbfail_string1;
- dbfail_string1 = NULL;
- dprintf(("[%d]: %s\nStill trying ...\n",getpid(),dbfail_string2));
- if (!debug) syslog(LOG_ERR,dbfail_string2);
- }
- }
- sleep(5);
- }
-
- if (dbfail_string1) free(dbfail_string1);
- if (dbfail_string2 != NULL) {
- free(dbfail_string2);
- dprintf(("[%d]: DB connection established\n",getpid()));
- if (!debug) syslog(LOG_INFO,"DB connection established\n");
- }
-}
-
-static int decrement_timeout(struct timeval *timeout, struct timeval before, struct timeval after)
-{
- (*timeout).tv_sec = (*timeout).tv_sec - (after.tv_sec - before.tv_sec);
- (*timeout).tv_usec = (*timeout).tv_usec - (after.tv_usec - before.tv_usec);
- while ( (*timeout).tv_usec < 0) {
- (*timeout).tv_sec--;
- (*timeout).tv_usec += 1000000;
- }
- if ( ((*timeout).tv_sec < 0) || (((*timeout).tv_sec == 0) && ((*timeout).tv_usec == 0)) ) return(1);
- else return(0);
-}
-
+++ /dev/null
-#!/bin/bash
-
-numjobs=$1
-
-# XXX - there must be better way to find stage
-STAGEDIR=/home/michal/shared/egee/jra1-head/stage
-. $STAGEDIR/sbin/perftest_common.sh
-
-DEBUG=${DEBUG:-0}
-PERFTEST_CONSUMER=./glite_lb_proxy_perf
-# CONSUMER_ARGS=
-# PERFTEST_COMPONENT=
-# COMPONENT_ARGS=
-#LOGJOBS_ARGS=""
-
-check_test_files || exit 1
-
-echo -e "\tsmall_job \t big_job \t small_dag \t big_dag"
-i=1
-while [[ $i -lt 5 ]]
-do
- CONSUMER_ARGS="-d --perf-sink $i"
- echo Running test $i
- run_test proxy $numjobs
- j=0
- while [[ $j -lt 4 ]]
- do
- echo -e -n "\t ${PERFTEST_THROUGHPUT[$j]}"
- j=$((j+1))
- done
- echo ""
- # purge jobs from database
- $LOGJOBS -n $numjobs > /tmp/perftest.jobids
- i=$((i+1))
-done
-
+++ /dev/null
-.project
-.cdtproject
\ No newline at end of file
+++ /dev/null
-LICENSE file for EGEE Middleware
-================================
-
-Copyright (c) 2004 on behalf of the EU EGEE Project:
-The European Organization for Nuclear Research (CERN),
-Istituto Nazionale di Fisica Nucleare (INFN), Italy
-Datamat Spa, Italy
-Centre National de la Recherche Scientifique (CNRS), France
-CS Systeme d'Information (CSSI), France
-Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
-Universiteit van Amsterdam (UvA), Netherlands
-University of Helsinki (UH.HIP), Finlan
-University of Bergen (UiB), Norway
-Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-1. Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution.
-
-3. The end-user documentation included with the redistribution, if
-any, must include the following acknowledgment: "This product includes
-software developed by The EU EGEE Project (http://cern.ch/eu-egee/)."
-Alternatively, this acknowledgment may appear in the software itself, if
-and wherever such third-party acknowledgments normally appear.
-
-4. The names EGEE and the EU EGEE Project must not be
-used to endorse or promote products derived from this software without
-prior written permission. For written permission, please contact
-<email address>.
-
-5. You are under no obligation whatsoever to provide anyone with any
-bug fixes, patches, or upgrades to the features, functionality or
-performance of the Software ("Enhancements") that you may develop over
-time; however, if you choose to provide your Enhancements to The EU
-EGEE Project, or if you choose to otherwise publish or distribute your
-Enhancements, in source code form without contemporaneously requiring
-end users of The EU EGEE Proejct to enter into a separate written license
-agreement for such Enhancements, then you hereby grant The EU EGEE Project
-a non-exclusive, royalty-free perpetual license to install, use, copy,
-modify, prepare derivative works, incorporate into the EGEE Middleware
-or any other computer software, distribute, and sublicense your
-Enhancements or derivative works thereof, in binary and source code
-form (if any), whether developed by The EU EGEE Project or third parties.
-
-THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL PROJECT OR ITS CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
-BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-This software consists of voluntary contributions made by many
-individuals on behalf of the EU EGEE Prject. For more information on The
-EU EGEE Project, please see http://cern.ch/eu-egee/. For more information on
-EGEE Middleware, please see http://egee-jra1.web.cern.ch/egee-jra1/
-
-
+++ /dev/null
-# defaults
-top_srcdir=.
-builddir=build
-top_builddir=${top_srcdir}/${builddir}
-stagedir=.
-distdir=.
-globalprefix=glite
-lbprefix=lb
-package=glite-lb-utils
-version=0.0.0
-PREFIX=/opt/glite
-
-glite_location=/opt/glite
-globus_prefix=/opt/globus
-nothrflavour=gcc32
-thrflavour=gcc32pthr
-expat_prefix=/opt/expat
-gsoap_prefix=/opt/gsoap
-
--include Makefile.inc
--include ../project/version.properties
-
-version=${module.version}
-
-VPATH=${top_srcdir}/src:${top_srcdir}/doc
-
-CC=gcc
-DEBUG:=-g -O0 -Wall
-CFLAGS:= \
- ${WS_CFLAGS} ${DEBUG} \
- -DVERSION=\"${version}\" \
- -I${stagedir}/include -I${top_srcdir}/src -I. \
- -I${top_srcdir}/interface \
- -I${expat_prefix}/include \
- -I${gsoap_prefix}/include -I${gsoap_prefix}/ \
- ${COVERAGE_FLAGS} \
- -I${mysql_prefix}/include -I${mysql_prefix}/include/mysql \
- -I${globus_prefix}/include/${nothrflavour} \
- $(GRIDSITE_CFLAGS) \
- -D_GNU_SOURCE
-LDFLAGS:=-L${stagedir}/lib
-
-COMPILE:=libtool --mode=compile ${CC} ${CFLAGS}
-LINK:=libtool --mode=link ${CC} -rpath ${stagedir}/lib ${LDFLAGS}
-SOLINK:=libtool --mode=link ${CC} -module ${LDFLAGS} -rpath ${stagedir}/lib
-LINKXX:=libtool --mode=link ${CXX} ${LDFLAGS}
-INSTALL:=libtool --mode=install install
-LINKXX:=libtool --mode=link ${CXX} -rpath ${stagedir}/lib ${LDFLAGS}
-XSLTPROC:=xsltproc
-
-GLOBUS_LIBS:= -L${globus_prefix}/lib \
- -lglobus_common_${nothrflavour} \
- -lglobus_gssapi_gsi_${nothrflavour} \
-
-ifneq (${mysql_prefix},/usr)
- ifeq ($(shell echo ${mysql_version} | cut -d. -f1,2),4.1)
- mysqlib := -L${mysql_prefix}/lib/mysql
- else
- mysqlib := -L${mysql_prefix}/lib
- endif
-endif
-
-ifneq (${expat_prefix},/usr)
- expatlib := -L${expat_prefix}/lib
-endif
-
-ifneq (${classads_prefix},/usr)
- classadslib := -L${classads_prefix}/lib -lclassad
-endif
-
-EXT_LIBS:= \
- ${mysqlib} -lmysqlclient -lz\
- ${expatlib} -lexpat \
- ${GRIDSITE_LIBS} \
- ${GLOBUS_LIBS}
-COMMON_LIB:=-lglite_lb_common_${nothrflavour}
-CLIENT_LIB:=-lglite_lb_client_${nothrflavour}
-BKSERVER_LIB:=-lglite_lb_bkserver
-JP_LIBS:=-lglite_jp_common -lglite_jp_trio
-
-UTILS:=statistics mon mon-db
-
-MAN_GZ:=glite-lb-mon.1.gz glite-lb-mon-db.1.gz
-MAN = $(MAN_GZ:.gz=)
-
-
-default: all
-
-compile all: ${UTILS} ${MAN_GZ}
-
-%.o: %.c
- ${CC} ${CFLAGS} ${GLOBUSINC} -c $<
-
-mon: mon.o
- ${LINK} -o $@ $< ${COMMON_LIB} ${CLIENT_LIB} ${EXT_LIBS}
-
-mon-db: mon-db.o
- ${LINK} -o $@ $< ${COMMON_LIB} ${BKSERVER_LIB} ${EXT_LIBS}
-
-statistics: statistics.o
- ${LINK} -rdynamic -o $@ $< ${JP_LIBS} ${EXT_LIBS}
-#${classadslib}
-
-check: compile
-
-doc: ${MAN_GZ}
-
-${MAN_GZ}: ${MAN}
- cp $? .
- gzip -f $(notdir $?)
-
-stage: compile
- $(MAKE) install PREFIX=${stagedir} DOSTAGE=yes
-
-dist: distsrc distbin
-
-distsrc:
- mkdir -p ${top_srcdir}/${package}-${version}
- cd ${top_srcdir} && GLOBIGNORE="${package}-${version}" && cp -Rf * ${package}-${version}
- cd ${top_srcdir} && tar -czf ${distdir}/${package}-${version}_src.tar.gz --exclude-from=project/tar_exclude ${package}-${version}
- rm -rf ${top_srcdir}/${package}-${version}
-
-distbin:
- $(MAKE) install PREFIX=`pwd`/tmpbuilddir${stagedir}
- save_dir=`pwd`; cd tmpbuilddir${stagedir} && tar -czf $$save_dir/${top_srcdir}/${distdir}/${package}-${version}_bin.tar.gz *; cd $$save_dir
- rm -rf tmpbuilddir
-
-install:
- -mkdir -p ${PREFIX}/bin
- -mkdir -p ${PREFIX}/lib
- -mkdir -p ${PREFIX}/share/doc/${package}-${version}
- -mkdir -p ${PREFIX}/share/man/man1
- ${INSTALL} -m 644 ${top_srcdir}/LICENSE ${PREFIX}/share/doc/${package}-${version}
- ${INSTALL} -m 644 ${top_srcdir}/doc/README* ${PREFIX}/share/doc/${package}-${version}
- ${INSTALL} -m 644 ${MAN_GZ} ${PREFIX}/share/man/man1
-
- for p in ${UTILS} ; do \
- ${INSTALL} -m 755 "$$p" "${PREFIX}/bin/glite-lb-$$p"; \
- done
-
- if [ x${DOSTAGE} != xyes ]; then \
- ${INSTALL} -m 755 ${stagedir}/lib/glite_lb_plugin.so ${PREFIX}/lib; \
- fi
-
-clean:
- rm -fv ${UTILS} ${MAN_GZ} *.{lo,o}
-
-%.o: %.c
- ${COMPILE} -c $<
-
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- Build file for the GLite lb utils component
-
- Authors: Jan Pospisil <honik@kma.zcu.cz>
- Version info: $Id$
- Release: $Name$
-
- Revision history:
- $Log$
- Revision 1.2 2005/10/15 00:59:34 jpospi
- Added LB Statistics
-
- Revision 1.1.1.1 2005/09/30 15:17:04 jpospi
- New org.glite.lb.utils component
-
-
--->
-
-<project name="utils" default="dist">
-
- <!-- ==============================================
- Builds the GLite lb utils component
- ============================================== -->
-
- <!-- =========================================
- Import properties (order is important)
- ========================================= -->
-
- <!-- Import baseline properties & user properties -->
- <import file="../org.glite/project/baseline.properties.xml" />
-
- <!-- import component build properties,
- component properties &
- component common properties -->
- <import file="./project/properties.xml"/>
-
- <!-- import subsystem build properties,
- subsystem properties &
- subsystem common properties -->
- <import file="${subsystem.properties.file}"/>
-
- <!-- import global build properties &
- global properties -->
- <import file="${global.properties.file}" />
-
- <!-- =========================================
- Load dependency property files (order is important)
- ========================================= -->
- <property file="${user.dependencies.file}"/>
- <property file="${component.dependencies.file}" />
- <property file="${subsystem.dependencies.file}" />
- <property file="${global.dependencies.file}"/>
-
- <!-- =========================================
- Load configuration definitions (order is important)
- ========================================= -->
- <import file="${global.configure.options.file}"/>
- <import file="${component.configure.options.file}"/>
-
- <!-- =========================================
- Import task definitions (order is important)
- ========================================= -->
- <import file="${subsystem.taskdefs.file}" />
- <import file="${global.taskdefs.file}" />
-
- <!-- =========================================
- Load common targets
- ========================================= -->
- <!-- Put your language target (java/c++-ant/c++-autotool/perl) here -->
- <import file="${global.targets-simple_make.file}" />
-
- <!-- =========================================
- Load version file
- ========================================= -->
- <property file="${module.version.file}"/>
- <property file="${module.build.file}"/>
-
- <!-- ==============================================
- Local private targets
- ============================================== -->
-
- <target name="localinit"
- description="Module specific initialization tasks">
- <antcall target="lbmakefiles" />
- </target>
-
- <target name="localcompile"
- description="Module specific compile tasks">
- </target>
-
- <target name="localclean"
- description="Module specific cleaning tasks">
- </target>
-
- <!-- =========================================
- RPM settings
- ========================================= -->
-
- <property name="build.package.summary" value="L&B utils" />
- <property name="build.package.description" value="L&B Statistics and L&B Monitoring utilities." />
-
-</project>
+++ /dev/null
-LB monitoring tools
-===================
-
-GLITE-LB-MON
-------------
-NAME
- glite-lb-mon - program for monitoring the number of jobs on the LB server and their several statistics
-
-
-SYNOPSIS
- glite-lb-mon [-t time]
-
-DESCRIPTION
- glite-lb-mon is a low-level program for monitoring the the number of jobs on the LB server and their several statistics.
- Values like minimum, average and maximum time spent in the system are calulated for jobs that entered the final state
- (Aborted, Cleared, Cancelled) in specific time (default last hour). Also number of jobs that entered the system during
- this time is calculated.
-
-OPTIONS
- -t time, --time=time
- querying time in seconds from now to the past [deault 3600]
-
-ENVIRONMENT
- EDG_WL_QUERY_SERVER
- set this environment variable to specify the LB server address to query
-
-FILES
- A special bkindex configuration needed. The following time indices must be defined:
-
- [ type = "time"; name = "submitted" ],
- [ type = "time"; name = "cleared" ],
- [ type = "time"; name = "aborted" ],
- [ type = "time"; name = "cancelled" ],
-
-
-GLITE-LB-MON-DB
----------------
-NAME
- glite-lb-mon-db - program for monitoring the number of jobs in the LB system
-
-SYNOPSIS
- glite-lb-mon-db [-m dbstring]
-
-DESCRIPTION
- glite-lb-mon-db is a low-level program for monitoring the the number of jobs in the LB system. Using the LB internals,
- it connects directly to the underlying MySQL database and reads the number of jobs in each state.
-
-OPTIONS
- -m dbstring, --mysql=dbstring
- use non-default database connection string
-
-EXAMPLES
- glite-lb-mon-db
- this is the default usage
-
- glite-lb-mon-db -m lbserver/@localhost:lbproxy
- use this dbstring to query the LB Proxy database. WARNING: the data in the LB Proxy may be incomplete!
-
-ENVIRONMENT
- MYSQL_UNIX_PORT
- set this environment variable to specify the path to the non-default MySQL socket path
-
- LBDB you can set this environment variable as an alternative to specify the non-default database connection string
-
-
+++ /dev/null
-LB statistics tools
-===================
-
-GLITE-LB-STATISTICS
--------------------
+++ /dev/null
-.TH GLITE-LB-MON-DB 1 "Mar 2006" "EU EGEE Project" "Logging & Bookkeeping Utils"
-
-.SH NAME
-glite-lb-mon-db - program for monitoring the number of jobs in the LB system
-
-.SH SYNOPSIS
-.B glite-lb-mon-db
-.B [-m dbstring]
-.br
-
-.SH DESCRIPTION
-.B glite-lb-mon-db
-is a low-level program for monitoring the the number of jobs in the LB system.
-Using the LB internals, it connects directly to the underlying MySQL database and reads
-the number of jobs in each state.
-
-.SH OPTIONS
-.TP
-.B \-m dbstring, \-\-mysql=dbstring
-use non-default database connection string
-
-.SH EXAMPLES
-.TP
-.BI glite-lb-mon-db
-this is the default usage
-.TP
-.BI glite-lb-mon-db \ -m \ lbserver/@localhost:lbproxy
-use this dbstring to query the LB Proxy database. WARNING: the data in the LB Proxy may be incomplete!
-
-.SH ENVIRONMENT
-.TP
-.B MYSQL_UNIX_PORT
-set this environment variable to specify the path to the non-default MySQL socket path
-.TP
-.B LBDB
-you can set this environment variable as an alternative to specify the non-default database connection string
-
-.SH REPORTING BUGS
-Please, report all bugs to EU EGEE Bug Tracking System located at https://savannah.cern.ch/bugs/?func=additem&group=jra1mdw
-
-.SH SEE ALSO
-.BR glite-lb-mon (1)
-
-.SH AUTHOR
-EU EGEE JRA1, CESNET group.
+++ /dev/null
-.TH GLITE-LB-MON 1 "Mar 2006" "EU EGEE Project" "Logging & Bookkeeping Utils"
-
-.SH NAME
-glite-lb-mon - program for monitoring the number of jobs on the LB server and their several statistics
-
-.SH SYNOPSIS
-.B glite-lb-mon
-.B [-t time]
-.br
-
-.SH DESCRIPTION
-.B glite-lb-mon
-is a low-level program for monitoring the the number of jobs on the LB server and their several statistics.
-Values like minimum, average and maximum time spent in the system are calulated for jobs that entered
-the final state (Aborted, Cleared, Cancelled) in specific time (default last hour). Also number of jobs
-that entered the system during this time is calculated.
-
-.SH OPTIONS
-.TP
-.B \-t time, \-\-time=time
-querying time in seconds from now to the past [deault 3600]
-
-.SH ENVIRONMENT
-.TP
-.B EDG_WL_QUERY_SERVER
-set this environment variable to specify the LB server address to query
-
-.SH FILES
-A special bkindex configuration needed. The following time indices must be defined:
-
-.nf
- [ type = "time"; name = "submitted" ],
- [ type = "time"; name = "cleared" ],
- [ type = "time"; name = "aborted" ],
- [ type = "time"; name = "cancelled" ],
-.fi
-
-
-.SH REPORTING BUGS
-Please, report all bugs to EU EGEE Bug Tracking System located at https://savannah.cern.ch/bugs/?func=additem&group=jra1mdw
-
-.SH SEE ALSO
-.BR glite-lb-bkindex (8),
-.BR glite-lb-mon (1)
-
-.SH AUTHOR
-EU EGEE JRA1, CESNET group.
+++ /dev/null
-[
- JobIndices = {
- [ type = "system"; name = "owner" ],
- [ type = "system"; name = "location" ],
- [ type = "system"; name = "destination" ],
- [ type = "time"; name = "submitted" ],
- [ type = "time"; name = "cleared" ],
- [ type = "time"; name = "aborted" ],
- [ type = "time"; name = "cancelled" ],
- }
-]
+++ /dev/null
-#Mon Apr 03 07:49:52 CEST 2006
-module.build=0089
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- Configuration build properties file for the GLite lb utils component
-
- Authors: Jan Pospisil <honik@kma.zcu.cz>
- Version info: $Id$
- Release: $Name$
-
- Revision history:
- $Log$
- Revision 1.2 2006/03/15 18:32:35 akrenek
- cares
-
- Revision 1.1.1.1 2005/09/30 15:17:04 jpospi
- New org.glite.lb.utils component
-
-
--->
-<project name="LB Utils configuration properties">
- <target name="lbmakefiles">
- <exec executable="ln" failonerror="true">
- <arg line="-fs ${component.dir}/Makefile ${module.build.dir}/Makefile"/>
- </exec>
- <echo file="${module.build.dir}/Makefile.inc">
-top_srcdir=..
-builddir=build
-stagedir=${stage.abs.dir}
-distdir=${dist.dir}
-globalprefix=${global.prefix}
-lbprefix=${subsystem.prefix}
-package=${module.package.name}
-PREFIX=${install.dir}
-version=${module.version}
-glite_location=${with.glite.location}
-globus_prefix=${with.globus.prefix}
-expat_prefix=${with.expat.prefix}
-mysql_prefix=${with.mysql.prefix}
-mysql_version=${ext.mysql.version}
-gsoap_prefix=${with.gsoap.prefix}
-gsoap_version=${ext.gsoap.version}
-cppunit_prefix=${with.cppunit.prefix}
-thrflavour=${with.globus.thr.flavor}
-nothrflavour=${with.globus.nothr.flavor}
- </echo>
- </target>
-</project>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- Common build properties file for the GLite lb utils component
-
- Authors: Jan Pospisil <honik@kma.zcu.cz>
- Version info: $Id$
- Release: $Name$
-
- Revision history:
- $Log$
-
--->
-
-<project name="lb utils component common properties">
-
- <!-- Include build properties to allow overwriting
- of properties for subsystem -->
- <property file="project/build.properties" />
-
- <!-- ======================================================
- Define corresponding subsystem properties
- ====================================================== -->
-
- <!-- Subsystem name -->
- <property name="subsystem.name" value="${lb.subsystem.name}"/>
-
- <!-- Subsystem prefix -->
- <property name="subsystem.prefix" value="${lb.subsystem.prefix}"/>
-
- <!-- ======================================================
- Define component properties
- ====================================================== -->
-
- <!-- Component name prefix -->
- <property name="component.prefix" value="utils" />
-
- <!-- ======================================================
- Define general component properties
- ====================================================== -->
-
- <import file="${component.general.properties.file}" />
-
- <!-- ======================================================
- Define extra properties here ...
- ====================================================== -->
-
-
-</project>
+++ /dev/null
-tar_exclude
-CVS
-build.xml
-build
-build.properties
-properties.xml
-configure-options.xml
-.cvsignore
-.project
-.cdtproject
+++ /dev/null
-module.version = 1.1.0
-module.age = 0
+++ /dev/null
-#ident "$Header$"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <getopt.h>
-#include <sysexits.h>
-#include <assert.h>
-
-#include "glite/wmsutils/jobid/strmd5.h"
-#include "glite/lb/consumer.h"
-#include "glite/lb/context-int.h"
-#include "glite/lb/lbs_db.h"
-#include "glite/lb/jobstat.h"
-
-static struct option opts[] = {
- { "mysql",1,NULL,'m' },
- { "verbose",0,NULL,'v' },
- { NULL, 0, NULL, 0 }
-};
-
-static void usage();
-static void do_exit(edg_wll_Context,int);
-static const char *me;
-
-int main(int argc,char **argv)
-{
- int opt;
- char *dbstring = getenv("LBDB");
- int verbose = 0, rows = 0, fields = 0, jobs = 0, i;
- edg_wll_Context ctx;
- char *stmt = NULL, *status = NULL;
- char *str[2];
- edg_wll_Stmt sh;
-
- me = strdup(argv[0]);
-
- while ((opt = getopt_long(argc,argv,"m:v",opts,NULL)) != EOF) switch (opt) {
- case 'm': dbstring = optarg; break;
- case 'v': verbose++; break;
- case '?': usage(); exit(EX_USAGE);
- }
-
- edg_wll_InitContext(&ctx);
- if (edg_wll_Open(ctx,dbstring)) do_exit(ctx,EX_UNAVAILABLE);
- if (edg_wll_DBCheckVersion(ctx)) do_exit(ctx,EX_SOFTWARE);
- if (asprintf(&stmt,"SELECT status,count(status) FROM states GROUP BY status;") < 0) do_exit(ctx,EX_OSERR);
- if (verbose) fprintf(stderr,"mysql query: %s\n",stmt);
- if ((rows = edg_wll_ExecStmt(ctx,stmt,&sh)) < 0) do_exit(ctx,EX_SOFTWARE);
- if (verbose) fprintf(stderr,"number of states returned: %d\n",rows);
- if (rows > 0) fprintf(stdout,"Number of jobs in each state: \n");
- for (i = 0; i < rows; i++) {
- fields = edg_wll_FetchRow(sh, str);
- if (fields != 2) {
- edg_wll_FreeStmt(&sh);
- do_exit(ctx,EX_SOFTWARE);
- }
- status = edg_wll_StatToString((edg_wll_JobStatCode) atoi(str[0]));
- jobs += atoi(str[1]);
- fprintf(stdout,"%s: %s\n",status,str[1]);
- if (str[0]) free(str[0]);
- if (str[1]) free(str[1]);
- if (status) free(status);
- }
- fprintf(stdout,"Total number of jobs: %d\n",jobs);
-
- if (stmt) free(stmt);
- edg_wll_FreeStmt(&sh);
- edg_wll_FreeContext(ctx);
-
- return 0;
-}
-
-static void do_exit(edg_wll_Context ctx,int code)
-{
- char *et,*ed;
-
- edg_wll_Error(ctx,&et,&ed);
- fprintf(stderr,"%s: %s (%s)\n",me,et,ed);
- exit(code);
-}
-
-static void usage()
-{
- fprintf(stderr,"usage: %s <options>\n"
- " -m,--mysql <dbstring> use non-default database connection\n"
- " -v,--verbose be verbose\n",
- me);
-}
+++ /dev/null
-#ident "$Header$"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <limits.h>
-#include <string.h>
-#include <errno.h>
-#include <getopt.h>
-#include <time.h>
-
-#include "glite/lb/consumer.h"
-
-#define DEFAULT_QUERY_TIME 3600
-
-static void usage(char *);
-static int query_all(edg_wll_Context, int, struct timeval, edg_wll_JobStat **);
-static void dgerr(edg_wll_Context,char *);
-
-static char *myname = NULL;
-static int debug = 0, verbose = 0, seconds = DEFAULT_QUERY_TIME, lbproxy = 0;
-static const char rcsid[] = "@(#)$Id$";
-
-static struct option const long_options[] = {
- { "help", no_argument, 0, 'h' },
- { "version", no_argument, 0, 'V' },
- { "verbose", no_argument, 0, 'v' },
- { "debug", no_argument, 0, 'd' },
- { "time", required_argument, 0, 't' },
- { "lbproxy", required_argument, 0, 'x' },
- { NULL, 0, NULL, 0}
-};
-
-int main(int argc,char *argv[]) {
- edg_wll_Context ctx;
- edg_wll_JobStat *statesOut = NULL;
- struct timeval time_now;
- int state[4] = { EDG_WLL_JOB_CLEARED, EDG_WLL_JOB_ABORTED, EDG_WLL_JOB_CANCELLED, EDG_WLL_JOB_SUBMITTED };
-
- int i, j, result, opt;
- result = opt = 0;
-
- myname = argv[0];
- fprintf(stdout,"\n");
- /* get arguments */
- while ((opt = getopt_long(argc,argv,
- "h" /* help */
- "V" /* version */
- "v" /* verbose */
- "d" /* debug */
- "t:" /* time [in seconds] */
- "x", /* lbproxy */
- long_options, (int *) 0)) != EOF) {
-
- switch (opt) {
- case 'V': fprintf(stdout,"%s:\t%s\n",argv[0],rcsid); exit(0);
- case 'v': verbose = 1; break;
- case 'd': debug = 1; break;
- case 'x': lbproxy = 1; break;
- case 't': seconds = atoi(optarg); break;
- case 'h':
- default:
- usage(argv[0]); exit(0);
- }
- }
- gettimeofday(&time_now,0);
-
- if ( edg_wll_InitContext(&ctx) ) {
- fprintf(stderr,"%s: cannot initialize edg_wll_Context\n ",myname);
- exit(1);
- }
-
- for ( j = 0; j < sizeof(state)/sizeof(state[0]); j++) {
- char *status = edg_wll_StatToString(state[j]);
- int min,avg,max,nJobs;
-
- avg = max = nJobs = 0;
- min = INT_MAX;
-
- fprintf(stdout,"Jobs that entered state %s in the last %d seconds: \n",status,seconds);
-
- if ( (result = query_all(ctx, state[j], time_now, &statesOut)) ) {
- dgerr(ctx, "edg_wll_QueryJobs");
- } else {
- if ( statesOut ) {
- for (i=0; statesOut[i].state; i++) {
- int val = statesOut[0].stateEnterTime.tv_sec
- - statesOut[0].stateEnterTimes[1+EDG_WLL_JOB_SUBMITTED];
-
- avg += val;
- if (val < min) min = val;
- if (val > max) max = val;
-
- edg_wll_FreeStatus(&statesOut[i]);
- }
- nJobs = i;
- free(statesOut);
- }
- if (nJobs > 0) avg = avg / nJobs;
- if (min == INT_MAX) min = 0;
-
- fprintf(stdout,"number of jobs: %d\n",nJobs);
- if (state[j] != EDG_WLL_JOB_SUBMITTED) {
- fprintf(stdout,"minimum time spent in the system: %d seconds\n",min);
- fprintf(stdout,"average time spent in the system: %d seconds\n",avg);
- fprintf(stdout,"maximum time spent in the system: %d seconds\n",max);
- }
- fprintf(stdout,"\n\n");
- }
-
- if (status) free(status);
-
- }
- edg_wll_FreeContext(ctx);
-
-
- return result;
-}
-
-static void
-usage(char *name) {
- fprintf(stdout, "Usage: %s [-x] [-t time]\n"
- "-h, --help display this help and exit\n"
- "-t, --time querying time in seconds from now to the past [deault %d]\n",
- name, DEFAULT_QUERY_TIME);
-}
-
-static int
-query_all(edg_wll_Context ctx, int query_status, struct timeval query_time, edg_wll_JobStat **statesOut) {
- edg_wll_QueryRec jc[3];
- int ret;
-
- memset(jc, 0, sizeof jc);
-
- /* jobs in the state 'query_status' within last hour */
- jc[0].attr = EDG_WLL_QUERY_ATTR_STATUS;
- jc[0].op = EDG_WLL_QUERY_OP_EQUAL;
- jc[0].value.i = query_status;
- jc[1].attr = EDG_WLL_QUERY_ATTR_TIME;
- jc[1].attr_id.state = query_status;
- jc[1].op = EDG_WLL_QUERY_OP_WITHIN;
- jc[1].value.t.tv_sec = query_time.tv_sec - seconds;
- jc[1].value.t.tv_usec = query_time.tv_usec;
- jc[1].value2.t.tv_sec = query_time.tv_sec;
- jc[1].value2.t.tv_usec = query_time.tv_usec;
- jc[2].attr = EDG_WLL_QUERY_ATTR_UNDEF;
-
- if ( (ret = edg_wll_QueryJobs(ctx, jc, 0, NULL, statesOut)) ) {
- if ( ret == E2BIG ) {
- int r;
- if ( edg_wll_GetParam(ctx, EDG_WLL_PARAM_QUERY_RESULTS, &r) ) return ret;
- if ( r != EDG_WLL_QUERYRES_LIMITED ) return ret;
-
- fprintf(stderr," edg_wll_QueryJobs() Warning: only limited result returned!\n");
- return 0;
- } else return ret;
- }
-
- return ret;
-}
-
-static void
-dgerr(edg_wll_Context ctx,char *where) {
- char *etxt,*edsc;
-
- edg_wll_Error(ctx,&etxt,&edsc);
- fprintf(stderr,"%s: %s: %s",myname,where,etxt);
- if (edsc) fprintf(stderr," (%s)",edsc);
- putc('\n',stderr);
- if(etxt) free(etxt);
- if(edsc) free(edsc);
-}
+++ /dev/null
-/*
- * load and test L&B plugin
- *
- * (requires -rdynamic to use fake JP backend symbols)
- */
-
-#include <string.h>
-#include <errno.h>
-#include <stdio.h>
-#include <time.h>
-#include <dlfcn.h>
-#include <malloc.h>
-#include <unistd.h>
-#include <getopt.h>
-
-#include "glite/jp/types.h"
-#include "glite/jp/context.h"
-#include "glite/jp/backend.h"
-#include "glite/jp/file_plugin.h"
-#include "glite/jp/known_attr.h"
-#include "glite/jp/attr.h"
-#include "glite/lb/jp_job_attrs.h"
-
-
-typedef int init_f(glite_jp_context_t ctx, glite_jpps_fplug_data_t *data);
-typedef void done_f(glite_jp_context_t ctx, glite_jpps_fplug_data_t *data);
-
-static const char rcsid[] = "@(#)$$";
-static int verbose = 0;
-static char *file = NULL;
-
-static struct option const long_options[] = {
- { "file", required_argument, 0, 'f' },
- { "help", no_argument, 0, 'h' },
- { "verbose", no_argument, 0, 'v' },
- { "version", no_argument, 0, 'V' },
- { NULL, 0, NULL, 0}
-};
-
-/*
- * usage
- */
-static void
-usage(char *program_name) {
- fprintf(stdout,"LB statistics\n"
- "- reads a dump file (one job only) \n"
- "- and outputs an XML with statistics to stdout \n\n"
- "Usage: %s [option]\n"
- "-h, --help display this help and exit\n"
- "-V, --version output version information and exit\n"
- "-v, --verbose print extensive debug output to stderr\n"
- "-f, --file <file> dump file to process\n\n",
- program_name);
-}
-
-/*
- * substitute implementatin of JP backend
- */
-
-int glite_jppsbe_pread(glite_jp_context_t ctx, void *handle, void *buf, size_t nbytes, off_t offset, ssize_t *nbytes_ret) {
- FILE *f;
-
- f = (FILE *)handle;
- if (fseek(f, offset, SEEK_SET) != 0) {
- *nbytes_ret = 0;
- return 0;
- }
- *nbytes_ret = fread(buf, 1, nbytes, f);
-
- return ferror(f) ? 1 : 0;
-}
-
-
-int glite_jp_stack_error(glite_jp_context_t ctx, const glite_jp_error_t *jperror) {
- fprintf(stderr,"lb_statistics: JP backend error %d: %s\n", jperror->code, jperror->desc);
- return 0;
-}
-
-
-int glite_jp_clear_error(glite_jp_context_t ctx) {
- return 0;
-}
-
-
-/*
- * free the array of JP attr
- */
-static void free_attrs(glite_jp_attrval_t *av) {
- glite_jp_attrval_t *item;
-
- item = av;
- while (item->name) {
- glite_jp_attrval_free(item++, 0);
- }
- free(av);
-}
-
-/*
- * main
- */
-int main(int argc, char *argv[])
-{
- glite_jp_context_t jpctx;
- glite_jpps_fplug_data_t plugin_data;
- void *data_handle, *lib_handle;
- FILE *f;
- glite_jp_attrval_t *attrval;
- char *err;
- init_f *plugin_init;
- done_f *plugin_done;
- int opt;
-
- /* get arguments */
- while ((opt = getopt_long(argc,argv,
- "f:" /* file */
- "h" /* help */
- "v" /* verbose */
- "V", /* version */
- long_options, (int *) 0)) != EOF) {
-
- switch (opt) {
- case 'V': fprintf(stdout,"%s:\t%s\n",argv[0],rcsid); return(0);
- case 'v': verbose = 1; break;
- case 'f': file = optarg; break;
- case 'h':
- default:
- usage(argv[0]); return(0);
- }
- }
-
- /* load L&B plugin and its 'init' symbol */
- if ((lib_handle = dlopen("glite_lb_plugin.so", RTLD_LAZY)) == NULL) {
- err = dlerror() ? :"unknown error";
- fprintf(stderr,"lb_statistics: can't load L&B plugin (%s)\n", err);
- return 1;
- }
- if ((plugin_init = dlsym(lib_handle, "init")) == NULL ||
- (plugin_done = dlsym(lib_handle, "done")) == NULL) {
- err = dlerror() ? : "unknown error";
- fprintf(stderr,"lb_statistics: can't find symbol 'init' or 'done' (%s)\n", err);
- goto err;
- }
-
- /* dump file with events */
- if ((f = fopen(file, "rt")) == NULL) {
- fprintf(stderr,"lb_statistics: Error: %s\n", strerror(errno));
- goto err;
- }
-
- /* use the plugin */
- plugin_init(jpctx, &plugin_data);
- plugin_data.ops.open(jpctx, f, "uri://", &data_handle);
-
- if (data_handle) {
- /* header */
- fprintf(stdout,"<?xml version=\"1.0\"?>\n\n");
- fprintf(stdout,"<lbd:jobRecord\n");
- fprintf(stdout,"\txmlns:lbd=\"http://glite.org/wsdl/types/lbdump\"\n");
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_jobId, &attrval);
- if (attrval) {
- fprintf(stdout,"\tjobid=\"%s\"\n", attrval->value);
- free_attrs(attrval);
- } else {
- fprintf(stdout,"\tjobid=\"default\"\n");
- }
- fprintf(stdout,">\n");
- /* /header */
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_user, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<user>%s</user>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_aTag, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<aTag>%s</aTag>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_rQType, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<rQType>%s</rQType>\n", ctime(&attrval->timestamp));
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_eDuration, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<eDuration>%s</eDuration>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_eNodes, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<eNodes>%s</eNodes>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_eProc, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<eProc>%s</eProc>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_RB, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<RB>%s</RB>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_CE, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<CE>%s</CE>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_host, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<host>%s</host>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_UIHost, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<UIHost>%s</UIHost>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_CPUTime, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<CPUTime>%s</CPUTime>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_NProc, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<NProc>%s</NProc>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_finalStatus, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<finalStatus>%s</finalStatus>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_finalStatusDate, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<finalStatusDate>%s</finalStatusDate>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_finalStatusReason, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<finalStatusReason>%s</finalStatusReason>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_LRMSDoneStatus, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<LRMSDoneStatus>%s</LRMSDoneStatus>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_LRMSStatusReason, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<LRMSStatusReason>%s</LRMSStatusReason>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_retryCount, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<retryCount>%s</retryCount>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_additionalReason, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<additionalReason>%s</additionalReason>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_jobType, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<jobType>%s</jobType>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_nsubjobs, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<nsubjobs>%s</nsubjobs>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_lastStatusHistory, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<lastStatusHistory>%s</lastStatusHistory>\n", attrval->value);
- free_attrs(attrval);
- }
-
- plugin_data.ops.attr(jpctx, data_handle, GLITE_JP_LB_fullStatusHistory, &attrval);
- if (attrval) {
- fprintf(stdout,"\t<fullStatusHistory>%s</fullStatusHistory>\n", attrval->value);
- free_attrs(attrval);
- }
-
- fprintf(stdout,"</lbd:jobRecord>\n");
-
- plugin_data.ops.close(jpctx, data_handle);
- }
- plugin_done(jpctx, &plugin_data);
-
- fclose(f);
- dlclose(lib_handle);
- return 0;
-
-err:
- dlclose(lib_handle);
- return 1;
-}
+++ /dev/null
-.project
-.cdtproject
\ No newline at end of file
+++ /dev/null
-LICENSE file for EGEE Middleware
-================================
-
-Copyright (c) 2004 on behalf of the EU EGEE Project:
-The European Organization for Nuclear Research (CERN),
-Istituto Nazionale di Fisica Nucleare (INFN), Italy
-Datamat Spa, Italy
-Centre National de la Recherche Scientifique (CNRS), France
-CS Systeme d'Information (CSSI), France
-Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
-Universiteit van Amsterdam (UvA), Netherlands
-University of Helsinki (UH.HIP), Finlan
-University of Bergen (UiB), Norway
-Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-1. Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution.
-
-3. The end-user documentation included with the redistribution, if
-any, must include the following acknowledgment: "This product includes
-software developed by The EU EGEE Project (http://cern.ch/eu-egee/)."
-Alternatively, this acknowledgment may appear in the software itself, if
-and wherever such third-party acknowledgments normally appear.
-
-4. The names EGEE and the EU EGEE Project must not be
-used to endorse or promote products derived from this software without
-prior written permission. For written permission, please contact
-<email address>.
-
-5. You are under no obligation whatsoever to provide anyone with any
-bug fixes, patches, or upgrades to the features, functionality or
-performance of the Software ("Enhancements") that you may develop over
-time; however, if you choose to provide your Enhancements to The EU
-EGEE Project, or if you choose to otherwise publish or distribute your
-Enhancements, in source code form without contemporaneously requiring
-end users of The EU EGEE Proejct to enter into a separate written license
-agreement for such Enhancements, then you hereby grant The EU EGEE Project
-a non-exclusive, royalty-free perpetual license to install, use, copy,
-modify, prepare derivative works, incorporate into the EGEE Middleware
-or any other computer software, distribute, and sublicense your
-Enhancements or derivative works thereof, in binary and source code
-form (if any), whether developed by The EU EGEE Project or third parties.
-
-THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL PROJECT OR ITS CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
-BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-This software consists of voluntary contributions made by many
-individuals on behalf of the EU EGEE Prject. For more information on The
-EU EGEE Project, please see http://cern.ch/eu-egee/. For more information on
-EGEE Middleware, please see http://egee-jra1.web.cern.ch/egee-jra1/
-
-
+++ /dev/null
-# Default values
-top_srcdir=.
-builddir=build
-top_builddir=${top_srcdir}/${builddir}
-stagedir=.
-distdir=.
-globalprefix=glite
-lbprefix=lb
-package=glite-lb-ws-interface
-version=0.0.0
-PREFIX=/opt/glite
-
--include Makefile.inc
--include ../project/version.properties
-
-version=${module.version}
-
-SUFFIXES = .T
-
-VPATH=${top_srcdir}/src
-AT3=perl -I${top_srcdir}/project ${top_srcdir}/project/at3
-XSLTPROC:=xsltproc --nonet
-TIDY=tidy
-XMLLINT:=xmllint --nonet
-docbookxls:=http://docbook.sourceforge.net/release/xsl/current/html/docbook.xsl
-
-STAGETO=interface
-
-WSDL=LB.wsdl LBTypes.wsdl
-
-all compile: ${WSDL} LB.html
-
-check:
- @echo No unit test required for interface-only module.
-
-stage: compile
- $(MAKE) install PREFIX=${stagedir} DOSTAGE=yes
-
-dist: distsrc distbin
-
-distsrc:
- mkdir -p ${top_srcdir}/${package}-${version}
- cd ${top_srcdir} && GLOBIGNORE="${package}-${version}" && cp -Rf * ${package}-${version}
- cd ${top_srcdir} && tar -czf ${distdir}/${package}-${version}_src.tar.gz --exclude-from=project/tar_exclude ${package}-${version}
- rm -rf ${top_srcdir}/${package}-${version}
-
-distbin:
- $(MAKE) install PREFIX=${top_srcdir}/tmpbuilddir
- cd ${top_srcdir}/tmpbuilddir && tar -czf ${top_srcdir}/${distdir}/${package}-${version}_bin.tar.gz *
- rm -rf ${top_srcdir}/tmpbuilddir
-
-install:
- -mkdir -p ${PREFIX}/${STAGETO}
- -mkdir -p ${PREFIX}/share/doc/${package}-${version}
- install -m 644 ${top_srcdir}/LICENSE ${PREFIX}/share/doc/${package}-${version}
- install -m 644 LB.html ${PREFIX}/share/doc/${package}-${version}
-# install the generated stuff instead
-# cd ${top_srcdir}/interface && install -m 644 ${WSDL} ${PREFIX}/${STAGETO}
- install -m 644 ${WSDL} ${PREFIX}/${STAGETO}
-
-# JP has its own version anyway
-# if [ x${DOSTAGE} = xyes ]; then \
-# mkdir -p ${PREFIX}/share/lb; \
-# install -m 644 ${top_srcdir}/src/puke-wsdl.xsl ${top_srcdir}/src/puke-ug.xsl ${PREFIX}/share/lb; \
-# fi
-
-clean:
- rm -f *.h
-
-
-%.xml: %.xml.T
- rm -f $@
- ${AT3} $< >$@ || rm -f $@
- chmod -w $@ >/dev/null
-
-${WSDL}: %.wsdl: %.xml puke-wsdl.xsl
- ${XSLTPROC} ../src/puke-wsdl.xsl $< >$@
- -${TIDY} -wrap 10000 -xml -m -i -q $@
- -perl -i -n -e 'if (/^\s*$$/) { $$empty .= "\n"; } elsif (/^\s*<(xsd:)?(enumeration|element|input|output|fault)/) { print $$_; $$empty = "";} else { print "$$empty$$_"; $$empty=""; }; ' $@
-
-LB.html: doc.xml LBTypes.xml LB.xml puke-ug.xsl
- ${XSLTPROC} --novalid ../src/puke-ug.xsl $< >doc-html.xml
- ${XMLLINT} --valid --noout doc-html.xml
- ${XSLTPROC} --stringparam chapter.autolabel 0 ${docbookxls} doc-html.xml >$@
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- Build file for the GLite lb ws-interface component
-
- Authors: Joachim Flammer <Joachim.Flammer@Cern.ch>
- Version info: $Id$
- Release: $Name$
-
- Revision history:
- $Log$
- Revision 1.2 2005/02/17 15:40:48 akrenek
- First attempt to generate LB.wsdl from LB's internal .T structure definitions
- - only JobStatus operation implemented so far
- - passes through wsdl2h
- - untested otherwise
- - breaks LB server compilation (obviously)
- - install & stage still uses hand-written LB.wsdl
-
- Revision 1.1.1.1 2004/11/25 15:20:10 akrenek
- initial import
-
- Revision 1.5 2004/07/20 16:08:30 flammer
- Changed incorrect my_... instead of .._template entries for subsystem and component.
-
- Revision 1.4 2004/07/16 16:32:53 flammer
- Added comment where to add language target.
-
- Revision 1.3 2004/07/16 14:56:55 flammer
- Corrected input path of build.properties.
-
- Revision 1.2 2004/07/06 20:43:19 flammer
- Update of configure & targets.
-
- Revision 1.1.1.1 2004/06/18 12:40:17 flammer
- Added general component template.
-
-
--->
-
-<project name="ws-interface" default="dist">
-
- <!-- ==============================================
- Builds the GLite lb ws-interface component
- ============================================== -->
-
- <!-- =========================================
- Import properties (order is important)
- ========================================= -->
-
- <!-- Import baseline properties & user properties -->
- <import file="../org.glite/project/baseline.properties.xml" />
-
- <!-- import component build properties,
- component properties &
- component common properties -->
- <import file="./project/properties.xml"/>
-
- <!-- import subsystem build properties,
- subsystem properties &
- subsystem common properties -->
- <import file="${subsystem.properties.file}"/>
-
- <!-- import global build properties &
- global properties -->
- <import file="${global.properties.file}" />
-
- <!-- =========================================
- Load dependency property files (order is important)
- ========================================= -->
- <property file="${user.dependencies.file}"/>
- <property file="${component.dependencies.file}" />
- <property file="${subsystem.dependencies.file}" />
- <property file="${global.dependencies.file}"/>
-
- <!-- =========================================
- Load configuration definitions (order is important)
- ========================================= -->
- <import file="${global.configure.options.file}"/>
- <import file="${component.configure.options.file}"/>
-
- <!-- =========================================
- Import task definitions (order is important)
- ========================================= -->
- <import file="${subsystem.taskdefs.file}" />
- <import file="${global.taskdefs.file}" />
-
- <!-- =========================================
- Load common targets
- ========================================= -->
- <!-- Put your language target (java/c++-ant/c++-autotool/perl) here -->
- <import file="${global.targets-simple_make.file}" />
-
- <!-- =========================================
- Load version file
- ========================================= -->
- <property file="${module.version.file}"/>
- <property file="${module.build.file}"/>
-
- <!-- ==============================================
- Local private targets
- ============================================== -->
-
- <target name="localinit"
- description="Module specific initialization tasks">
- <copy toDir="${module.project.dir}">
- <fileset dir="${subsystem.project.dir}">
- <include name="at3" />
- <include name="*.T" />
- <include name="*.pm" />
- </fileset>
- </copy>
- <antcall target="lbmakefiles" />
- </target>
-
- <target name="localcompile"
- description="Module specific compile tasks">
- </target>
-
- <target name="localclean"
- description="Module specific cleaning tasks">
- </target>
-
-</project>
+++ /dev/null
-#Mon Apr 03 07:43:34 CEST 2006
-module.build=0143
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- Configuration build properties file for the GLite lb ws-interface component
-
- Authors: Joachim Flammer <Joachim.Flammer@cern.ch>
- Version info: $Id$
- Release: $Name$
-
- Revision history:
- $Log$
- Revision 1.3 2004/07/20 16:08:30 flammer
- Changed incorrect my_... instead of .._template entries for subsystem and component.
-
- Revision 1.2 2004/07/16 14:56:55 flammer
- Corrected input path of build.properties.
-
- Revision 1.1 2004/07/06 20:43:19 flammer
- Update of configure & targets.
-
-
-
--->
-<project name="lb ws-interface component configuration properties">
- <target name="lbmakefiles">
- <exec executable="ln" failonerror="true">
- <arg line="-fs ${component.dir}/Makefile ${module.build.dir}/Makefile"/>
- </exec>
- <echo file="${module.build.dir}/Makefile.inc">
-top_srcdir=..
-builddir=build
-stagedir=${stage.abs.dir}
-distdir=${dist.dir}
-globalprefix=${global.prefix}
-lbprefix=${subsystem.prefix}
-package=${module.package.name}
-PREFIX=${install.dir}
-version=${module.version}
-glite_location=${with.glite.location}
- </echo>
- </target>
-</project>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- Common build properties file for the GLite lb ws-interface component
-
- Authors: Joachim Flammer <Joachim.Flammer@cern.ch>
- Version info: $Id$
- Release: $Name$
-
- Revision history:
- $Log$
- Revision 1.4 2004/07/20 16:08:30 flammer
- Changed incorrect my_... instead of .._template entries for subsystem and component.
-
- Revision 1.3 2004/07/16 14:56:55 flammer
- Corrected input path of build.properties.
-
- Revision 1.2 2004/07/16 14:36:49 flammer
-
- Corrected build.properties include.
-
- Revision 1.1.1.1 2004/06/18 12:40:17 flammer
- Added general component template.
-
-
--->
-
-<project name="lb ws-interface component common properties">
-
- <!-- Include build properties to allow overwriting
- of properties for subsystem -->
- <property file="project/build.properties" />
-
- <!-- ======================================================
- Define corresponding subsystem properties
- ====================================================== -->
-
- <!-- Subsystem name -->
- <property name="subsystem.name" value="${lb.subsystem.name}"/>
-
- <!-- Subsystem prefix -->
- <property name="subsystem.prefix" value="${lb.subsystem.prefix}"/>
-
- <!-- ======================================================
- Define component properties
- ====================================================== -->
-
- <!-- Component name prefix -->
- <property name="component.prefix" value="ws-interface" />
-
- <!-- ======================================================
- Define general component properties
- ====================================================== -->
-
- <import file="${component.general.properties.file}" />
-
- <!-- ======================================================
- Define extra properties here ...
- ====================================================== -->
-
-
-</project>
+++ /dev/null
-#Fri Sep 02 14:16:49 CEST 2005
-# glite-lb-ws-interface_branch_3_0_0_RC15 tak is taken!
-module.version=2.2.0
-module.age=0
+++ /dev/null
-<?xml version="1.0"?>
-
-<service name="LoggingAndBookkeeping"
- ns="http://glite.org/wsdl/services/lb"
- prefix="lb"
- typeNs="http://glite.org/wsdl/types/lb"
- typePrefix="lbt"
- elemNs="http://glite.org/wsdl/elements/lb"
- elemPrefix="lbe">
-
- <version>CVS revision: <![CDATA[ $Header$ ]]> </version>
-
- <doc>
- <para>Logging and Bookkeeping (L&B) service keeps track of grid jobs.</para>
-
- <para>The service receives job events
- from various components of the Workload Management System, stores
- them, and derives then the corresponding job states.</para>
-
- <para>Job information is fed into LB through a logging interface
- (<ulink url="https://edms.cern.ch/file/571273/1/LB-guide.pdf">legacy C and shell API</ulink>)
- and it is not exposed as a web service yet.</para>
-
- <para>The users may query job states or retrieve LB events either via the same C/C++
- interface or via this web-service interface.</para>
-
- <para>Besides querying for the job state actively the user may also register for
- receiving notifications on particular job state changes.
- This functionality is supported only in the legacy C/C++ interface.</para>
- </doc>
-
- <import namespace="http://glite.org/wsdl/services/lb" location="LBTypes.wsdl"/>
-
- <fault name="genericFault"/>
-
- <operations>
- <op name="GetVersion">
- Return version of the service.
- <output name="version" type="xsd:string">Returned version.</output>
- <fault name="genericFault" type="genericFault"> Any error. </fault>
- </op>
- <op name="JobStatus">
- Query state of a single job.
- <input name="jobid" type="xsd:string"> Id of the queried job. </input>
- <input name="flags" type="jobFlags"> Which data fields to retrieve. </input>
- <output name="stat" type="jobStatus"> Current state of the job. </output>
- <fault name="genericFault" type="genericFault"> Any error. </fault>
- </op>
- <op name="UserJobs">
- Simplified query, return all jobs of the authenticated user.
- <output name="jobs" type="xsd:string" list="yes"> JobId's of jobs matching the query.</output>
- <output name="states" type="jobStatus" list="yes"> States of jobs matching the query.</output>
- <fault name="genericFault" type="genericFault"> Any error. </fault>
- </op>
- <op name="QueryJobs">
- Retrieve a list of jobs, including their states, based on query conditions.
- The conditions take the form of a list of lists.
- Elements of the inner lists refer to a single job attribute, the conditions are or-ed.
- Elements of the outer list may refer to different job attributes, they are and-ed.
-
- <input name="conditions" type="queryConditions" list="yes">The query conditions.</input>
- <input name="flags" type="jobFlags"> Which data fields to retrieve.</input>
- <output name="jobs" type="xsd:string" list="yes"> JobId's of jobs matching the query.</output>
- <output name="states" type="jobStatus" list="yes"> States of jobs matching the query.</output>
- <fault name="genericFault" type="genericFault"> Any error. </fault>
- </op>
-
- <op name="QueryEvents">
- Retrieve events matching a query.
- There are two sets of conditions, on jobs and on individual events, both have to be satisfied.
- Both query conditions have the same format as in QueryJobs.
-
- <input name="jobConditions" type="queryConditions" list="yes">
- Conditions on jobs.
- </input>
- <input name="eventConditions" type="queryConditions" list="yes">
- Conditions on individual events.
- </input>
- <output name="events" type="event" list="yes">All matchin events.</output>
- <fault name="genericFault" type="genericFault"> Any error. </fault>
- </op>
-
- <!-- TODO: event queries -->
-
- </operations>
-
-</service>
+++ /dev/null
-<?xml version="1.0"?>
-
-@@@LANG: wsdl
-<!--
-@@@AUTO
--->
-
-<service name="LoggingAndBookkeeping"
- ns="http://glite.org/wsdl/services/lb"
- typePrefix="lbt">
-
- <version>CVS revision: <![CDATA[ $Header$ ]]></version>
-
- <types ns="http://glite.org/wsdl/types/lb">
- <flags name="jobFlags">
- Flags determining which fields of job status should be retrieved.
- Can be combined arbitrarily.
- <val name="CLASSADS">Include also long job descriptions (JDL).</val>
- <val name="CHILDREN">Return list of subjobs of a DAG.</val>
- <val name="CHILDSTAT">Return state of the subjobs, i.e. apply other flags recursively </val>
- </flags>
-
- <enum name="statName">
-@@@{
- for my $stat ($status->getTypesOrdered) {
- my $u = uc $stat;
- my $c = getTypeComment $status $stat;
- gen qq{
-! <val name="$u">$c</val>
-};
- }
-@@@}
- </enum>
-
-@@@{
- for my $n ($status->getAllFieldsOrdered) {
- my $f = selectField $status $n;
- if ($f->{codes}) {
- my $n = getName $f;
- $n = $1.ucfirst $2 while $n =~ /([[:alpha:]]*)_([[:alpha:]_]*)/;
- gen qq{
-! <enum name="$n">
-};
- for (@{$f->{codes}}) {
- my $uc = uc $_->{name};
- gen qq{
-! <val name="$uc">$_->{comment}</val>
-};
- }
- gen qq{
-! </enum>
-};
- }
- }
-@@@}
-
- <struct name="jobStatus">
- Status of a job, possibly including subjobs.
- <elem name="state" type="statName">Status name.</elem>
-@@@{
- for my $n (getAllFieldsOrdered $status) {
- selectField $status $n;
- my $f = getField $status;
- my $name = getName $f;
- $name = $1.ucfirst $2 while $name =~ /([[:alpha:]]*)_([[:alpha:]_]*)/;
- my $type = $f->{type};
- my $list = 'no';
-
- if ($main::baseTypes{$type}) {
- $type = eval $main::types{wsdl}->{$main::baseTypes{$type}};
- $list = 'yes'
- }
- elsif ($f->{codes}) {
- $type = $name;
- }
- else {
- $type = getType $f;
- }
-
- my $comment = getComment $f;
- if ($name eq 'stateEnterTimes' || $name eq 'childrenHist') {
-# XXX: stateEnterTimes and childrenHist are the only case of enum-indexed array
- gen qq{
-! <elem name="$name" type="${name}Item" list="yes" optional="yes">$comment</elem>
-};
- }
- else {
-# XXX: currently nothing is "optional" as we don't know from status.T
- gen qq{
-! <elem name="$name" type="$type" list="$list" optional="no">$comment</elem>
-};
- }
- }
-@@@}
- </struct>
-
- <choice name="event">
-@@@{
- for my $name (sort { $event->{order}->{$a} <=> $event->{order}->{$b} } getTypes $event) {
- my $comment = getTypeComment $event $name;
- $name = $1.ucfirst $2 while $name =~ /([[:alpha:]]*)_([[:alpha:]_]*)/;
- gen qq{
-! <elem name="$name" type="event$name">$comment</elem>
-};
- }
-@@@}
- </choice>
-
-@@@{
- selectType $event '_common_';
- for ($event->getFieldsOrdered) {
- my $f = selectField $event $_;
- if ($f->{codes}) {
- my $n = ucfirst getName $f;
- $n = $1.ucfirst $2 while $n =~ /([[:alpha:]]*)_([[:alpha:]_]*)/;
- gen qq{
-! <enum name="$n">
-};
- for (@{$f->{codes}}) {
- my $uc = uc $_->{name};
- gen qq{
-! <val name="$uc">$_->{comment}</val>
-};
- }
- gen qq{
-! </enum>
-!
-};
- }
- }
-
- for my $type (sort { $event->{order}->{$a} <=> $event->{order}->{$b} } getTypes $event) {
- my $tn = $type;
- $tn = $1.ucfirst $2 while $tn =~ /([[:alpha:]]*)_([[:alpha:]_]*)/;
-
- selectType $event $type;
- for ($event->getFieldsOrdered) {
- my $f = selectField $event $_;
- if ($f->{codes}) {
- my $n = $tn . '_' . getName $f;
- $n = $1.ucfirst $2 while $n =~ /([[:alpha:]]*)_([[:alpha:]_]*)/;
- gen qq{
-! <enum name="$n">
-};
- for (@{$f->{codes}}) {
- my $uc = uc $_->{name};
- gen qq{
-! <val name="$uc">$_->{comment}</val>
-};
- }
- gen qq{
-! </enum>
-!
-};
- }
- }
-
- gen qq{
-! <struct name="event$tn">
-};
- selectType $event '_common_';
- for ($event->getFieldsOrdered) {
- my $f = selectField $event $_;
- my $fn = $f->{name};
- my $ftn = $f->getType;
-
- $fn = $1.ucfirst $2 while $fn =~ /([[:alpha:]]*)_([[:alpha:]_]*)/;
- $ftn = ucfirst $fn if ($f->{codes});
- $ftn = $1.ucfirst $2 while $ftn =~ /([[:alpha:]]*)_([[:alpha:]_]*)/;
-
- my $comment = getComment $f;
- my $opt = $f->{optional} ? 'yes' : 'no';
- gen qq{\t\t\t\t<elem name="$fn" type="$ftn" optional="$opt">$comment</elem>
-};
- }
-
- selectType $event $type;
- for ($event->getFieldsOrdered) {
- my $f = selectField $event $_;
- my $fn = $f->{name};
- my $ftn = $f->getType;
-
-
- $fn = $1.ucfirst $2 while $fn =~ /([[:alpha:]]*)_([[:alpha:]_]*)/;
- $ftn = $tn . '_' . $fn if ($f->{codes});
- $ftn = $1.ucfirst $2 while $ftn =~ /([[:alpha:]]*)_([[:alpha:]_]*)/;
-
-
- my $comment = getComment $f;
- my $opt = $f->{optional} ? 'yes' : 'no';
- gen qq{\t\t\t\t<elem name="$fn" type="$ftn" optional="$opt">$comment</elem>
-};
- }
- gen "</struct>\n\n";
- }
-@@@}
-
- <enum name="eventSource">
- Possible sources of LB events.
- <val name="UserInterface"/>
- <val name="NetworkServer"/>
- <val name="WorkloadManager"/>
- <val name="BigHelper"/>
- <val name="JobSubmission"/>
- <val name="LogMonitor"/>
- <val name="LRMS"/>
- <val name="Application"/>
- </enum>
-
- <struct name="stateEnterTimesItem">
- <elem name="state" type="statName">The job state.</elem>
- <elem name="time" type="xsd:dateTime">When the state was entered last time.</elem>
- </struct>
-
- <struct name="childrenHistItem">
- <elem name="state" type="statName">The job state of the children.</elem>
- <elem name="count" type="xsd:int">How many children are in this state.</elem>
- </struct>
-
- <struct name="tagValue">
- <elem name="tag" type="xsd:string"/>
- <elem name="value" type="xsd:string" optional="yes"/>
- </struct>
-
- <struct name="timeval">
- <elem name="tvSec" type="xsd:long"/>
- <elem name="tvUsec" type="xsd:long"/>
- </struct>
-
-
- <struct name="genericFault" fault="yes">
- Generic SOAP fault, used to deliver any LB errors.
- <elem name="source" type="xsd:string">Source component (module) of the error.</elem>
- <elem name="code" type="xsd:int">Numeric error code.</elem>
- <elem name="text" type="xsd:string">Error text corresponding to the code.</elem>
- <elem name="description" type="xsd:string" optional="yes">Additional description of the error (e.g. filename)</elem>
- <elem name="reason" type="genericFault" optional="yes">Reason of the error, coming from lower levels.</elem>
- </struct>
-
- <struct name="queryConditions">
- An element of outer list of query conditions in job queries.
- It expresses possibly several conditions (records) on a single job attribute.
- These conditions are logically OR-ed.
- <elem name="attr" type="queryAttr"> The job attribute to which the query conditions apply. </elem>
- <elem name="tagName" type="xsd:string" optional="yes"> Name of the queried user tag if attr is USERTAG.</elem>
- <elem name="statName" type="statName" optional="yes"> Name of the job state to which "attr = TIME" condition refers.</elem>
- <elem name="record" type="queryRecord" list="yes"> The conditions. </elem>
- </struct>
-
- <enum name="queryAttr">
- Specification of a job attribute in query.
- <val name="JOBID">A concrete JobId</val>
- <val name="OWNER">Owner of the job (X509 certificate subject).</val>
- <val name="STATUS">Status of the job (see statName type).</val>
- <val name="LOCATION">Where the job is currently handled (hostname).</val>
- <val name="DESTINATION">Where the job is or was scheduled to be executed.</val>
- <val name="DONECODE">How the job terminated (see doneCode type)</val>
- <val name="USERTAG">Value of particular user tag. The tag name has to be specified in queryConditions.tagName.</val>
- <val name="TIME">When the job entered a particular state. The state has to be specified in queryCondition.statName.</val>
- <val name="RESUBMITTED">The job was resubmitted.</val>
- <val name="PARENT">JobId of the job parend (DAG).</val>
- <val name="EXITCODE">UNIX exit code of the job.</val>
- <val name="HOST">Where the event was generated.</val>
- <val name="SOURCE">Source component.</val>
- <val name="INSTANCE">Instance of the source component.</val>
- <val name="EVENTTYPE">Event type.</val>
- <val name="CHKPTTAG">Checkpoint tag.</val>
- </enum>
-
- <struct name="queryRecord">
- A single query condition.
- <elem name="op" type="queryOp"> Relational operator of the condition.</elem>
- <elem name="value1" type="queryRecValue"> Value to compare the attribute with.</elem>
- <elem name="value2" type="queryRecValue" optional="yes"> Another value to compare the attribute with (op = WITHIN only).</elem>
- </struct>
-
- <enum name="queryOp">
- Relational operator of query conditions.
- <val name="EQUAL"> Attribute is equal to the specified value </val>
- <val name="LESS"> Attribute is less than the specified value or equal </val>
- <val name="GREATER"> Attribute is greater than the specified value or equal </val>
- <val name="WITHIN"> Attribute is withing a range (queryRecord.value2 must be specified) </val>
- <val name="UNEQUAL"> Attribute is not equal to the specified value.</val>
- </enum>
-
- <choice name="queryRecValue">
- A value to compare an attribute with in queries.
- Exactly one of the elements must be specified.
- <elem name="i" type="xsd:int">Integer.</elem>
- <elem name="c" type="xsd:string">String.</elem>
- <elem name="t" type="timeval">Timestamp.</elem>
- </choice>
-
-
- </types>
-
-</service>
+++ /dev/null
-<?xml version="1.0"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.4//EN"
- "http://www.oasis-open.org/docbook/xml/4.4/docbookx.dtd">
-<book>
-</book>
+++ /dev/null
-<?xml version="1.0"?>
-
-<!-- $Header$ -->
-
-<xsl:stylesheet version="1.0"
- xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
-
-<xsl:output indent="yes" doctype-public="-//OASIS//DTD DocBook XML V4.2//EN"
- doctype-system="http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"/>
-
-<xsl:template match="book">
- <chapter>
- <title><xsl:value-of select="document('LB.xml')/service/@name"/></title>
-
- <sect1>
- <title>Overview</title>
- <xsl:copy-of select="document('LB.xml')/service/doc/*"/>
- </sect1>
-
- <sect1>
- <title>Operations</title>
- <para> <emphasis><xsl:value-of select="document('LB.xml')/service/version"/></emphasis> </para>
- <!-- xsl:apply-templates select="operations/op" -->
- <xsl:apply-templates select="document('LB.xml')/service/operations/op">
- <xsl:sort select="@name"/>
- </xsl:apply-templates>
- </sect1>
-
- <sect1>
- <title>Types</title>
- <!--xsl:apply-templates select="types"/ -->
- <para> <emphasis><xsl:value-of select="document('../build/LBTypes.xml')/service/version"/></emphasis> </para>
- <xsl:apply-templates select="document('../build/LBTypes.xml')/service/types"/>
- </sect1>
- </chapter>
-</xsl:template>
-
-
-<xsl:template match="input|output|fault">
- <varlistentry>
- <term>
- <!--type-->
- <xsl:if test = "@list = 'yes'">list of </xsl:if>
- <xsl:choose>
- <xsl:when test="not(starts-with(@type,'xsd:'))">
- <link linkend="type:{@type}">
- <type><xsl:value-of select="@type "/> </type> </link>
- </xsl:when>
- <xsl:otherwise><type><xsl:value-of select="@type "/> </type> </xsl:otherwise>
- </xsl:choose>
- <!--/type-->
- <parameter> <xsl:value-of select=" @name"/></parameter>
- </term>
- <listitem>
- <simpara><xsl:value-of select="text()"/></simpara>
- </listitem>
- </varlistentry>
-</xsl:template>
-
-<xsl:template match="op" >
- <sect2 id="op:{@name}">
- <title><xsl:value-of select="@name"/></title>
- <para><xsl:value-of select="text()"/></para>
- <para>
- Inputs:
- <xsl:choose>
- <xsl:when test="count(./input)>0">
- <variablelist>
- <xsl:apply-templates select="./input"/>
- </variablelist>
- </xsl:when>
- <xsl:otherwise>N/A</xsl:otherwise>
- </xsl:choose>
- </para>
- <para>
- Outputs:
- <variablelist>
- <xsl:apply-templates select="./output"/>
- </variablelist>
- </para>
- </sect2>
-</xsl:template>
-
-<xsl:template match="types">
- <xsl:for-each select="flags|enum|struct|choice">
- <xsl:sort select="@name"/>
- <sect2 id="type:{@name}">
- <title> <xsl:value-of select="@name"/> </title>
- <para> <xsl:value-of select="text()"/> </para>
- <xsl:choose>
- <xsl:when test="name(.)='struct'">
- <para> <emphasis>Structure</emphasis> (sequence complex type in WSDL)</para>
- <para> Fields: ( <type>type </type> <structfield>name</structfield> description )</para>
- </xsl:when>
- <xsl:when test="name(.)='choice'">
- <para> <emphasis>Union</emphasis> (choice complex type in WSDL)</para>
- <para> Fields: ( <type>type </type> <structfield>name</structfield> description )</para>
- </xsl:when>
- <xsl:when test="name(.)='enum'">
- <para> <emphasis>Enumeration</emphasis> (restriction of xsd:string in WSDL),
- exactly one of the values must be specified.
- </para>
- <para> Values: </para>
- </xsl:when>
- <xsl:when test="name(.)='flags'">
- <para> <emphasis>Flags</emphasis> (sequence of restricted xsd:string in WSDL),
- any number of values can be specified together.
- </para>
- <para> Values: </para>
- </xsl:when>
- </xsl:choose>
- <variablelist>
- <xsl:for-each select="elem|val">
- <varlistentry>
- <term>
- <xsl:choose>
- <xsl:when test="name(.)='elem'">
- <xsl:if test="@list = 'yes'">list of </xsl:if>
- <xsl:choose>
- <xsl:when test="@type!='string' and @type!='int' and not(starts-with(@type,'xsd:'))">
- <link linkend="type:{@type}">
- <type><xsl:value-of select="@type "/> </type>
- </link>
- </xsl:when>
- <xsl:otherwise><type><xsl:value-of select="@type "/></type></xsl:otherwise>
- </xsl:choose>
- <!-- <type><xsl:value-of select="@type"/></type> -->
- <xsl:value-of select="' '"/>
- <structfield><xsl:value-of select="@name"/></structfield>
- </xsl:when>
- <xsl:otherwise>
- <constant><xsl:value-of select="@name"/></constant>
- </xsl:otherwise>
- </xsl:choose>
- </term>
- <listitem>
- <simpara>
- <xsl:if test="@optional = 'yes'"> (optional) </xsl:if>
- <!-- <xsl:if test="@list = 'yes'"> (multiple occurence) </xsl:if> -->
- <xsl:value-of select=" text()"/>
- </simpara>
- </listitem>
- </varlistentry>
- </xsl:for-each>
- </variablelist>
- </sect2>
- </xsl:for-each>
-</xsl:template>
-
-
-</xsl:stylesheet>
+++ /dev/null
-<?xml version="1.0"?>
-
-<xsl:stylesheet version="1.0"
- xmlns="http://schemas.xmlsoap.org/wsdl/"
- xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
- xmlns:xsd="http://www.w3.org/2001/XMLSchema"
- xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"
- xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
-
- xmlns:lb="http://glite.org/wsdl/services/lb"
- xmlns:lbe="http://glite.org/wsdl/elements/lb"
- xmlns:lbt="http://glite.org/wsdl/types/lb">
-
-<xsl:output indent="yes"/>
-
-<xsl:template match="/service">
- <definitions
- xmlns="http://schemas.xmlsoap.org/wsdl/"
- name="{@name}"
- targetNamespace="{@ns}">
- <documentation>
- <xsl:value-of select="version"/>
- <xsl:value-of select="text()"/>
- </documentation>
-
- <xsl:apply-templates select="import"/>
-
- <xsl:apply-templates select="types"/>
-
-<!-- <xsl:apply-templates select="fault"/> -->
-
- <xsl:apply-templates select="operations"/>
-
- </definitions>
-</xsl:template>
-
-<xsl:template match="types">
- <wsdl:types>
- <xsd:schema targetNamespace="{@ns}"
- elementFormDefault="unqualified"
- attributeFormDefault="unqualified">
-
- <xsl:apply-templates/>
- </xsd:schema>
- </wsdl:types>
- <!-- <xsl:apply-templates select="struct[@fault='yes']" mode="message"/> -->
-</xsl:template>
-
-<!--
-<xsl:template match="simple">
- <xsd:element name="{@name}" type="xsd:{@name}"/>
- <xsd:complexType name="{@name}List">
- <xsd:sequence>
- <xsd:element name="{@name}" type="xsd:{@name}" minOccurs="0" maxOccurs="unbounded"></xsd:element>
- </xsd:sequence>
- </xsd:complexType>
- <xsd:element name="{@name}List" type="{/service/@typePrefix}:{@name}List"/>
-</xsl:template>
-
-<xsl:template match="list">
- <xsd:complexType name="{@name}List">
- <xsd:sequence>
- <xsd:element name="{@name}" type="xsd:{@name}" minOccurs="0" maxOccurs="unbounded"></xsd:element>
- </xsd:sequence>
- </xsd:complexType>
-</xsl:template>
--->
-
-<xsl:template match="enum">
- <xsd:simpleType name="{@name}">
- <xsd:restriction base="xsd:string">
- <xsl:for-each select="val"><xsd:enumeration value="{@name}"/></xsl:for-each>
- </xsd:restriction>
- </xsd:simpleType>
-<!-- <xsd:element name="{@name}" type="{/service/@typePrefix}:{@name}"/> -->
-</xsl:template>
-
-<xsl:template match="flags">
- <xsd:simpleType name="{@name}Value">
- <xsd:restriction base="xsd:string">
- <xsl:for-each select="val"><xsd:enumeration value="{@name}"/></xsl:for-each>
- </xsd:restriction>
- </xsd:simpleType>
- <xsd:complexType name="{@name}">
- <xsd:sequence>
- <xsd:element name="flag" type="{/service/@typePrefix}:{@name}Value" minOccurs="0" maxOccurs="unbounded"/>
- </xsd:sequence>
- </xsd:complexType>
-<!-- <xsd:element name="{@name}" type="{/service/@typePrefix}:{@name}"/> -->
-</xsl:template>
-
-<xsl:template match="struct">
- <xsd:complexType name="{@name}">
- <xsd:sequence>
- <xsl:call-template name="inner-struct"/>
- </xsd:sequence>
- </xsd:complexType>
-</xsl:template>
-
-<xsl:template match="choice">
- <xsd:complexType name="{@name}">
- <xsd:choice>
- <xsl:call-template name="inner-struct"/>
- </xsd:choice>
- </xsd:complexType>
-</xsl:template>
-
-
-<xsl:template name="inner-struct">
- <xsl:variable name="nillable">
- <xsl:choose>
- <xsl:when test="local-name(.)='choice'">true</xsl:when>
- <xsl:otherwise>false</xsl:otherwise>
- </xsl:choose>
- </xsl:variable>
- <xsl:for-each select="elem">
- <xsl:variable name="type">
- <xsl:choose>
- <xsl:when test="contains(@type,':')">
- <xsl:value-of select="@type"/>
- </xsl:when>
- <xsl:otherwise>
- <xsl:value-of select="/service/@typePrefix"/>:<xsl:value-of select="@type"/>
- </xsl:otherwise>
- </xsl:choose>
- </xsl:variable>
- <xsl:variable name="min">
- <xsl:choose>
- <xsl:when test="@optional='yes'">0</xsl:when>
- <xsl:otherwise>1</xsl:otherwise>
- </xsl:choose>
- </xsl:variable>
- <xsl:variable name="max">
- <xsl:choose>
- <xsl:when test="@list='yes'">unbounded</xsl:when>
- <xsl:otherwise>1</xsl:otherwise>
- </xsl:choose>
- </xsl:variable>
- <xsd:element name="{@name}" type="{$type}" minOccurs="{$min}" maxOccurs="{$max}" nillable="{$nillable}"/>
- </xsl:for-each>
-<!--
- <xsd:complexType name="{@name}List">
- <xsd:sequence>
- <xsd:element name="{@name}" type="{/service/@typePrefix}:{@name}" minOccurs="0" maxOccurs="unbounded"></xsd:element>
- </xsd:sequence>
- </xsd:complexType>
- <xsd:element name="{@name}" type="{/service/@typePrefix}:{@name}"/>
- <xsd:element name="{@name}List" type="{/service/@typePrefix}:{@name}List"/>
--->
-</xsl:template>
-
-<xsl:template match="op" mode="message">
- <wsdl:message name="{@name}Request">
- <wsdl:part name="input" element="{/service/@elemPrefix}:{@name}">
- <wsdl:documentation><xsl:value-of select="text()"/></wsdl:documentation>
- </wsdl:part>
- </wsdl:message>
- <wsdl:message name="{@name}Response">
- <wsdl:part name="output" element="{/service/@elemPrefix}:{@name}Response">
- <wsdl:documentation><xsl:value-of select="text()"/></wsdl:documentation>
- </wsdl:part>
- </wsdl:message>
-</xsl:template>
-
-<xsl:template match="op" mode="element">
- <xsd:element name="{@name}">
- <xsd:complexType>
- <xsd:sequence>
- <xsl:for-each select="input">
- <xsl:variable name="prefix">
- <xsl:choose>
- <xsl:when test="starts-with(@type,'xsd:')"/>
- <xsl:otherwise><xsl:value-of select="/service/@typePrefix"/>:</xsl:otherwise>
- </xsl:choose>
- </xsl:variable>
- <xsl:variable name="max">
- <xsl:choose>
- <xsl:when test="@list='yes'">unbounded</xsl:when>
- <xsl:otherwise>1</xsl:otherwise>
- </xsl:choose>
- </xsl:variable>
- <xsd:element name="{@name}" type="{$prefix}{@type}" minOccurs="1" maxOccurs="{$max}"/>
- </xsl:for-each>
- </xsd:sequence>
- </xsd:complexType>
- </xsd:element>
- <xsd:element name="{@name}Response">
- <xsd:complexType>
- <xsd:sequence>
- <xsl:for-each select="output">
- <xsl:variable name="prefix">
- <xsl:choose>
- <xsl:when test="starts-with(@type,'xsd:')"/>
- <xsl:otherwise><xsl:value-of select="/service/@typePrefix"/>:</xsl:otherwise>
- </xsl:choose>
- </xsl:variable>
- <xsl:variable name="max">
- <xsl:choose>
- <xsl:when test="@list='yes'">unbounded</xsl:when>
- <xsl:otherwise>1</xsl:otherwise>
- </xsl:choose>
- </xsl:variable>
- <xsd:element name="{@name}" type="{$prefix}{@type}" minOccurs="1" maxOccurs="{$max}"/>
- </xsl:for-each>
- </xsd:sequence>
- </xsd:complexType>
- </xsd:element>
-</xsl:template>
-
-
-<xsl:template match="struct[@fault='yes']" mode="message">
- <wsdl:message name="{@name}">
- <wsdl:part name="{@name}" element="{/service/@typePrefix}:{@name}">
- <wsdl:documentation><xsl:value-of select="text()"/></wsdl:documentation>
- </wsdl:part>
- </wsdl:message>
-</xsl:template>
-
-<xsl:template match="op" mode="port-type">
- <wsdl:operation name="{@name}">
- <wsdl:documentation><xsl:value-of select="text()"/></wsdl:documentation>
- <wsdl:input name="i" message="{/service/@prefix}:{@name}Request"/>
- <wsdl:output name="o" message="{/service/@prefix}:{@name}Response"/>
- <wsdl:fault name="f" message="{/service/@prefix}:{fault/@name}"/>
- </wsdl:operation>
-</xsl:template>
-
-<xsl:template match="op" mode="binding">
- <wsdl:operation name="{@name}">
- <soap:operation style="document"/>
- <wsdl:input name="i">
- <soap:body use="literal"/>
- </wsdl:input>
- <wsdl:output name="o">
- <soap:body use="literal"/>
- </wsdl:output>
- <wsdl:fault name="f">
- <soap:fault name="f" use="literal"/>
- </wsdl:fault>
- </wsdl:operation>
-</xsl:template>
-
-<xsl:template match="import">
- <wsdl:import namespace="{@namespace}" location="{@location}"/>
-</xsl:template>
-
-<xsl:template match="operations">
- <wsdl:types>
- <xsd:schema targetNamespace="{/service/@elemNs}"
- elementFormDefault="unqualified"
- attributeFormDefault="unqualified">
-
- <xsl:apply-templates select="op" mode="element"/>
-
- <xsl:for-each select="/service/fault">
- <xsd:element name="{@name}" type="{/service/@typePrefix}:{@name}"/>
- </xsl:for-each>
- </xsd:schema>
- </wsdl:types>
-
- <xsl:apply-templates select="/service/fault"/>
-
- <xsl:apply-templates select="op" mode="message"/>
-
- <wsdl:portType name="{/service/@name}PortType">
- <xsl:apply-templates select="op" mode="port-type"/>
- </wsdl:portType>
-
- <binding name="{/service/@name}" type="{/service/@prefix}:{/service/@name}PortType">
- <soap:binding style="document" transport="http://schemas.xmlsoap.org/soap/http"/>
- <xsl:apply-templates select="op" mode="binding"/>
- </binding>
-
- <service name="{/service/@name}">
- <documentation><xsl:value-of select="text()"/></documentation>
- <port name="{/service/@name}" binding="{/service/@prefix}:{/service/@name}">
- <soap:address location="http://test.glite.org/{/service/@prefix}:8080"/>
- </port>
-
- </service>
-
-</xsl:template>
-
-<xsl:template match="fault">
- <wsdl:message name="{@name}">
- <wsdl:part name="{@name}" element="{/service/@elemPrefix}:{@name}" />
- </wsdl:message>
-</xsl:template>
-
-
-</xsl:stylesheet>
-
+++ /dev/null
-.project
-.cdtproject
\ No newline at end of file
+++ /dev/null
-LICENSE file for EGEE Middleware\r
-================================\r
-\r
-Copyright (c) 2004 on behalf of the EU EGEE Project: \r
-The European Organization for Nuclear Research (CERN), \r
-Istituto Nazionale di Fisica Nucleare (INFN), Italy\r
-Datamat Spa, Italy\r
-Centre National de la Recherche Scientifique (CNRS), France\r
-CS Systeme d'Information (CSSI), France\r
-Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden\r
-Universiteit van Amsterdam (UvA), Netherlands\r
-University of Helsinki (UH.HIP), Finlan\r
-University of Bergen (UiB), Norway\r
-Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom\r
-\r
-Redistribution and use in source and binary forms, with or without\r
-modification, are permitted provided that the following conditions are\r
-met: \r
-\r
-1. Redistributions of source code must retain the above copyright\r
-notice, this list of conditions and the following disclaimer.\r
-\r
-2. Redistributions in binary form must reproduce the above copyright\r
-notice, this list of conditions and the following disclaimer in the\r
-documentation and/or other materials provided with the distribution.\r
-\r
-3. The end-user documentation included with the redistribution, if\r
-any, must include the following acknowledgment: "This product includes\r
-software developed by The EU EGEE Project (http://cern.ch/eu-egee/)."\r
-Alternatively, this acknowledgment may appear in the software itself, if\r
-and wherever such third-party acknowledgments normally appear.\r
-\r
-4. The names EGEE and the EU EGEE Project must not be\r
-used to endorse or promote products derived from this software without\r
-prior written permission. For written permission, please contact\r
-<email address>.\r
-\r
-5. You are under no obligation whatsoever to provide anyone with any\r
-bug fixes, patches, or upgrades to the features, functionality or\r
-performance of the Software ("Enhancements") that you may develop over\r
-time; however, if you choose to provide your Enhancements to The EU\r
-EGEE Project, or if you choose to otherwise publish or distribute your\r
-Enhancements, in source code form without contemporaneously requiring\r
-end users of The EU EGEE Proejct to enter into a separate written license\r
-agreement for such Enhancements, then you hereby grant The EU EGEE Project\r
-a non-exclusive, royalty-free perpetual license to install, use, copy,\r
-modify, prepare derivative works, incorporate into the EGEE Middleware\r
-or any other computer software, distribute, and sublicense your\r
-Enhancements or derivative works thereof, in binary and source code\r
-form (if any), whether developed by The EU EGEE Project or third parties.\r
-\r
-THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED\r
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\r
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r
-DISCLAIMED. IN NO EVENT SHALL PROJECT OR ITS CONTRIBUTORS BE LIABLE\r
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\r
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\r
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\r
-BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\r
-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\r
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN\r
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
-\r
-This software consists of voluntary contributions made by many\r
-individuals on behalf of the EU EGEE Prject. For more information on The\r
-EU EGEE Project, please see http://cern.ch/eu-egee/. For more information on\r
-EGEE Middleware, please see http://egee-jra1.web.cern.ch/egee-jra1/\r
-\r
-\r
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- Build file for the GLite Middleware Loggin and Bookkeeping Subsystem
-
- Authors: Ales Krenek <ljocha@ics.muni.cz>
- Version info: $Id$
- Release: $Name$
-
- Revision history:
- $Log$
- Revision 1.42.4.1 2006/08/21 09:44:43 mmulac
- *** empty log message ***
-
- Revision 1.42 2006/03/24 08:26:11 akrenek
- missing utils->client dependency (should fix nightly build)
-
- Revision 1.41 2006/03/15 17:35:35 akrenek
- cares
-
- Revision 1.40 2006/03/15 17:33:24 akrenek
- merge of 1.5 branch
-
- Revision 1.39 2005/11/27 21:51:18 eronchie
- Applied fix for bug 13928
-
- Revision 1.38 2005/11/22 12:13:47 eronchie
- Removed one utils
-
- Revision 1.37 2005/11/04 14:39:20 akrenek
- merge in the "lbonly" hack
-
- Revision 1.36.4.6 2006/02/06 12:10:00 akrenek
- make proxy depend on logger explicitely
-
- Revision 1.36.4.5 2005/12/02 11:15:33 akrenek
- resolved logger dependency conflict
-
- Revision 1.36.4.4 2005/12/02 10:04:14 jpospi
- server depends on logger
-
- Revision 1.36.4.3 2005/11/28 10:39:08 akrenek
- merge bug #13928 fix on the pre_cares branch
-
- Revision 1.36.4.2 2005/11/24 12:59:36 akrenek
- server depends on bones
-
- Revision 1.36.4.1 2005/11/24 12:32:02 akrenek
- merged in "lbonly"
-
- Revision 1.39 2005/11/27 21:51:18 eronchie
- Applied fix for bug 13928
-
- Revision 1.38 2005/11/22 12:13:47 eronchie
- Removed one utils
-
- Revision 1.37 2005/11/04 14:39:20 akrenek
- merge in the "lbonly" hack
-
- Revision 1.36 2005/10/15 13:36:26 akrenek
- added lb.utils
-
- Revision 1.35 2005/10/14 17:19:08 akrenek
- build also lb.utils
-
- Revision 1.34 2005/10/14 11:02:06 akrenek
- depends on jp.primary
-
- Revision 1.33 2005/09/19 15:23:45 akrenek
- "The gigantic merge"; from release 1.4 branch to HEAD
-
- Revision 1.32.2.2 2005/10/31 12:45:10 akrenek
- conditional disabling of cross-subsystem builds via "lbonly" property
-
- Revision 1.32.2.1 2005/08/18 10:36:05 eronchie
- Added cross-subsystem dependencies feature in org.glite.lb subsystem.
- Added wms-utils.jobid and security.gsoap-plugin among common dependencies.
- Added security.voms among server dependencies.
- At the moment I have not consider gridsite.core component.
- Once built gridsiste component, run ant in org.glite.lb to build the whole lb
- and its external gLite dependencies.
-
- Revision 1.32 2005/08/03 11:58:25 akrenek
- Merged the release 1.0 branch
-
- Revision 1.31 2005/05/26 15:13:37 zurek
- inserted module.build.file
-
- Revision 1.30 2005/03/17 09:59:31 zsalvet
- Add proxy target.
-
- Revision 1.29 2005/03/16 10:02:52 zsalvet
- proxy component added
-
- Revision 1.28.2.1 2005/02/12 01:38:13 glbuild
- Changed start time
-
- Revision 1.28 2004/11/29 15:16:26 zsalvet
- Add ws-interface to checkout/build machinery.
-
- Revision 1.27 2004/11/29 13:55:02 akrenek
- added dependence on ws-interface
-
- Revision 1.26 2004/10/29 22:53:41 dimeglio
- Use envset target
-
- Revision 1.25 2004/10/28 22:57:36 dimeglio
- Quoted ant command line parameters
-
- Revision 1.24 2004/10/28 18:19:30 dimeglio
- Added envcheck to individual components
-
- Revision 1.23 2004/10/26 15:12:06 dimeglio
- Fixed spelling error in envchecks
-
- Revision 1.22 2004/10/26 13:11:13 dimeglio
- Added dependency of clean on envchecks
-
- Revision 1.21 2004/10/25 20:57:38 dimeglio
- Use antExec
-
- Revision 1.20 2004/10/21 10:37:48 akrenek
- make server depend on logger due to notification interlogger
-
- Revision 1.19 2004/10/15 12:30:01 akrenek
- build with lb.sever-bones
-
- Revision 1.18 2004/09/29 13:35:39 flammer
- Only update of line formatting.
-
- Revision 1.17 2004/09/09 21:05:29 dimeglio
- Moved tag target to targets-common
-
- Revision 1.16 2004/08/27 03:03:12 dimeglio
- Preserve original timestamp when copying file and overwrite when new
-
- Revision 1.15 2004/08/27 02:46:52 dimeglio
- Added logic to create the tagged dependencies properties file
-
- Revision 1.14 2004/08/26 23:48:26 dimeglio
- Added generation of cruisecontrol config file entry
-
- Revision 1.13 2004/08/09 04:56:14 dimeglio
- Replaced rtag with tag
-
- Revision 1.12 2004/08/09 04:36:44 dimeglio
- Fixed inheritance of do.cvs.tag property
-
- Revision 1.11 2004/08/09 03:06:11 dimeglio
- Fixed inheritance of failonerror property
-
- Revision 1.10 2004/08/09 02:35:35 dimeglio
- Set default value for failonerror
-
- Revision 1.9 2004/08/08 19:07:17 dimeglio
- Added small.memory type of build
-
- Revision 1.8 2004/07/29 10:47:46 dimeglio
- Added server and logger
-
- Revision 1.7 2004/07/29 07:39:57 dimeglio
- Removed local envinfo and envcheck target (must use common ones)
-
- Revision 1.6 2004/07/16 16:25:43 flammer
- Added tags for automated adding of components.
-
- Revision 1.5 2004/07/06 17:43:33 flammer
- Update of classpath definitions, targets & configure file.
-
- Revision 1.4 2004/06/23 00:24:03 dimeglio
- Added common and client modules
-
- Revision 1.3 2004/06/22 18:24:35 dimeglio
- Added client interface module
-
- Revision 1.2 2004/06/18 23:05:12 dimeglio
- Added/upgraded default build scripts
-
--->
-
-<project name="lb" default="dist">
-
- <description>
- Ant build file to build the GLite Logging and Bookkeping Subsystem
- </description>
-
- <!-- =========================================
- Builds the GLite LB subsystem
- ========================================= -->
-
- <!-- Import baseline & user properties -->
- <import file="../org.glite/project/baseline.properties.xml" />
-
- <!-- Import subsystem build properties,
- subsystem properties &
- subsystem common properties -->
- <import file="./project/properties.xml" />
-
- <!-- Import global build properties and global properties -->
- <import file="${global.properties.file}" />
-
- <!-- =========================================
- Load dependencies properties files (order is important)
- ========================================= -->
- <property file="${user.dependencies.file}"/>
- <property file="${subsystem.dependencies.file}"/>
- <property file="${global.dependencies.file}"/>
-
- <!-- =========================================
- Load configure options
- ========================================= -->
- <import file="${global.configure.options.file}"/>
-
- <!-- =========================================
- Import global task definitions
- ========================================= -->
- <import file="${global.taskdefs.file}" />
-
- <!-- =========================================
- Import global compiler definitions
- ========================================= -->
- <import file="${global.compilerdefs.file}" />
-
- <!-- =========================================
- Import targets
- ========================================= -->
- <import file="${global.targets-common.file}"/>
-
- <!-- =========================================
- Load version file
- ========================================= -->
- <property file="${module.version.file}"/>
- <property file="${module.build.file}"/>
-
- <!-- ===============================================
- Public common targets
- =============================================== -->
-
- <target name="localinit" depends="envcheck">
-
- <echo> Preparing directories ... </echo>
-
- <mkdir dir="${stage.bin.dir}" />
- <mkdir dir="${stage.lib.dir}" />
- <mkdir dir="${stage.java.dir}" />
- <mkdir dir="${stage.inc.dir}" />
- <mkdir dir="${stage.int.dir}" />
-
- <mkdir dir="${dist.dir}" />
-
- </target>
-
- <target name="init" depends="localinit">
- <antcall target="buildmodules">
- <param name="target" value="init"/>
- </antcall>
- </target>
-
- <target name="checkstyle" depends="localinit">
- <antcall target="buildmodules">
- <param name="target" value="checkstyle"/>
- </antcall>
- </target>
-
- <target name="compile" depends="localinit">
- <antcall target="buildmodules">
- <param name="target" value="compile"/>
- </antcall>
- </target>
-
- <target name="compiletest" depends="localinit">
- <antcall target="buildmodules">
- <param name="target" value="compiletest"/>
- </antcall>
- </target>
-
- <target name="unittest" depends="localinit">
- <antcall target="buildmodules">
- <param name="target" value="unittest"/>
- </antcall>
- </target>
-
- <target name="unitcoverage" depends="localinit">
- <antcall target="buildmodules">
- <param name="target" value="unitcoverage"/>
- </antcall>
- </target>
-
- <target name="stage" depends="localinit">
- <antcall target="buildmodules">
- <param name="target" value="stage"/>
- </antcall>
- </target>
-
- <target name="dist" depends="localinit">
- <antcall target="buildmodules">
- <param name="target" value="dist"/>
- </antcall>
- </target>
-
- <target name="install" depends="localinit">
- <antcall target="buildmodules">
- <param name="target" value="install"/>
- </antcall>
- </target>
-
- <target name="doc" depends="localinit">
- <antcall target="buildmodules">
- <param name="target" value="doc"/>
- </antcall>
- </target>
-
- <target name="all" depends="localinit">
- <antcall target="buildmodules">
- <param name="target" value="all"/>
- </antcall>
- </target>
-
- <target name="clean" depends="envcheck">
-
- <property name="offline.repository" value="true" />
- <antcall target="buildmodules">
- <param name="target" value="clean"/>
- </antcall>
-
- <delete dir="${module.bin.dir}" />
- <delete dir="${module.lib.dir}" />
- <delete dir="${module.autosrc.dir}" />
- <delete dir="${module.autodoc.dir}" />
- <delete dir="${module.test.reports.dir}" />
-
- </target>
-
- <target name="cleanAll" depends="clean"/>
-
- <!-- ===============================================
- Private targets
- =============================================== -->
-
- <!-- ===============================================
- Modules proxy targets
- =============================================== -->
-
- <!-- component targets definitions tag = do not remove = -->
-
- <target name="security.gsoap-plugin" unless="lbonly" depends="envset">
- <if>
- <isset property="small.memory"/>
- <then>
- <exec dir="${security.subsystem.dir}" executable="${antExec}" failonerror="${failonerror}">
- <arg line="gsoap-plugin -Dtarget=${target} -Dsmall.memory=true -Dbootstrap=${bootstrap} -Dfailonerror=${failonerror} -Ddo.cvs.tag=${do.cvs.tag}"/>
- </exec>
- </then>
- <else>
- <ant dir="${security.subsystem.dir}"
- target="gsoap-plugin"
- inheritall="false" >
- <property name="target" value="${target}"/>
- </ant>
- </else>
- </if>
- </target>
-
- <target name="security.voms-api-c" unless="lbonly" depends="envset">
- <if>
- <isset property="small.memory"/>
- <then>
- <exec dir="${security.subsystem.dir}" executable="${antExec}" failonerror="${failonerror}">
- <arg line="voms-api-c -Dtarget=${target} -Dsmall.memory=true -Dbootstrap=${bootstrap} -Dfailonerror=${failonerror} -Ddo.cvs.tag=${do.cvs.tag}"/>
- </exec>
- </then>
- <else>
- <ant dir="${security.subsystem.dir}"
- target="voms-api-c"
- inheritall="false" >
- <property name="target" value="${target}"/>
- </ant>
- </else>
- </if>
- </target>
-
- <target name="wms-utils.jobid" unless="lbonly" depends="envset">
- <if>
- <isset property="small.memory"/>
- <then>
- <exec dir="${wms-utils.subsystem.dir}" executable="${antExec}" failonerror="${failonerror}">
- <arg line="jobid -Dtarget=${target} -Dsmall.memory=true -Dbootstrap=${bootstrap} -Dfailonerror=${failonerror} -Ddo.cvs.tag=${do.cvs.tag}"/>
- </exec>
- </then>
- <else>
- <ant dir="${wms-utils.subsystem.dir}"
- target="jobid"
- inheritall="false" >
- <property name="target" value="${target}"/>
- </ant>
- </else>
- </if>
- </target>
-
- <target name="jp.primary" unless="lbonly" depends="envset">
- <if>
- <isset property="small.memory"/>
- <then>
- <exec dir="${jp.subsystem.dir}" executable="${antExec}" failonerror="${failonerror}">
- <arg line="primary -Dtarget=${target} -Dsmall.memory=true -Dbootstrap=${bootstrap} -Dfailonerror=${failonerror} -Ddo.cvs.tag=${do.cvs.tag}"/>
- </exec>
- </then>
- <else>
- <ant dir="${jp.subsystem.dir}"
- target="primary"
- inheritall="false" >
- <property name="target" value="${target}"/>
- </ant>
- </else>
- </if>
- </target>
-
- <if>
- <isset property="setenvonly"/>
- <then>
- <property name="lbonly" value="yes"/>
- </then>
- </if>
-
- <target name="client-interface" unless="setenvonly" depends="envset">
- <if>
- <isset property="small.memory" />
- <then>
- <exec dir="${lb.subsystem.dir}.client-interface" executable="${antExec}" failonerror="${failonerror}">
- <arg line="${target} "-Dsmall.memory=true" "-Dbootstrap=${bootstrap}" "-Dfailonerror=${failonerror}" "-Ddo.cvs.tag=${do.cvs.tag}""/>
- </exec>
- </then>
- <else>
- <ant dir="${lb.subsystem.dir}.client-interface"
- target="${target}"
- inheritall="false" />
- </else>
- </if>
- </target>
-
- <target name="ws-interface" unless="setenvonly" depends="envset">
- <if>
- <isset property="small.memory" />
- <then>
- <exec dir="${lb.subsystem.dir}.ws-interface" executable="${antExec}" failonerror="${failonerror}">
- <arg line="${target} "-Dsmall.memory=true" "-Dbootstrap=${bootstrap}" "-Dfailonerror=${failonerror}" "-Ddo.cvs.tag=${do.cvs.tag}""/>
- </exec>
- </then>
- <else>
- <ant dir="${lb.subsystem.dir}.ws-interface"
- target="${target}"
- inheritall="false" />
- </else>
- </if>
- </target>
-
- <target name="server-bones" unless="setenvonly" depends="envset">
- <if>
- <isset property="small.memory" />
- <then>
- <exec dir="${lb.subsystem.dir}.server-bones" executable="${antExec}" failonerror="${failonerror}">
- <arg line="${target} "-Dsmall.memory=true" "-Dbootstrap=${bootstrap}" "-Dfailonerror=${failonerror}" "-Ddo.cvs.tag=${do.cvs.tag}""/>
- </exec>
- </then>
- <else>
- <ant dir="${lb.subsystem.dir}.server-bones"
- target="${target}"
- inheritall="false" />
- </else>
- </if>
- </target>
-
- <target name="common" unless="setenvonly" depends="envset, globus, expat, c-ares, wms-utils.jobid, security.gsoap-plugin, client-interface">
- <if>
- <isset property="small.memory" />
- <then>
- <exec dir="${lb.subsystem.dir}.common" executable="${antExec}" failonerror="${failonerror}">
- <arg line="${target} "-Dsmall.memory=true" "-Dbootstrap=${bootstrap}" "-Dfailonerror=${failonerror}" "-Ddo.cvs.tag=${do.cvs.tag}""/>
- </exec>
- </then>
- <else>
- <ant dir="${lb.subsystem.dir}.common"
- target="${target}"
- inheritall="false" />
- </else>
- </if>
- </target>
-
- <target name="client" unless="setenvonly" depends="envset, globus, expat, c-ares, client-interface, common">
- <if>
- <isset property="small.memory" />
- <then>
- <exec dir="${lb.subsystem.dir}.client" executable="${antExec}" failonerror="${failonerror}">
- <arg line="${target} "-Dsmall.memory=true" "-Dbootstrap=${bootstrap}" "-Dfailonerror=${failonerror}" "-Ddo.cvs.tag=${do.cvs.tag}""/>
- </exec>
- </then>
- <else>
- <ant dir="${lb.subsystem.dir}.client"
- target="${target}"
- inheritall="false" />
- </else>
- </if>
- </target>
-
- <target name="server" unless="setenvonly" depends="envset, globus, expat, c-ares, mysql, gsoap, security.voms-api-c, jp.primary, client-interface, ws-interface, common, server-bones, logger">
- <if>
- <isset property="small.memory" />
- <then>
- <exec dir="${lb.subsystem.dir}.server" executable="${antExec}" failonerror="${failonerror}">
- <arg line="${target} "-Dsmall.memory=true" "-Dbootstrap=${bootstrap}" "-Dfailonerror=${failonerror}" "-Ddo.cvs.tag=${do.cvs.tag}""/>
- </exec>
- </then>
- <else>
- <ant dir="${lb.subsystem.dir}.server"
- target="${target}"
- inheritall="false" />
- </else>
- </if>
- </target>
-
- <target name="proxy" unless="setenvonly" depends="envset, globus, expat, c-ares, client-interface, common, server, logger">
- <if>
- <isset property="small.memory" />
- <then>
- <exec dir="${lb.subsystem.dir}.proxy" executable="${antExec}" failonerror="${failonerror}">
- <arg line="${target} "-Dsmall.memory=true" "-Dbootstrap=${bootstrap}" "-Dfailonerror=${failonerror}" "-Ddo.cvs.tag=${do.cvs.tag}""/>
- </exec>
- </then>
- <else>
- <ant dir="${lb.subsystem.dir}.proxy"
- target="${target}"
- inheritall="false" />
- </else>
- </if>
- </target>
-
- <target name="logger" unless="setenvonly" depends="envset, globus, expat, c-ares, client-interface, common">
- <if>
- <isset property="small.memory" />
- <then>
- <exec dir="${lb.subsystem.dir}.logger" executable="${antExec}" failonerror="${failonerror}">
- <arg line="${target} "-Dsmall.memory=true" "-Dbootstrap=${bootstrap}" "-Dfailonerror=${failonerror}" "-Ddo.cvs.tag=${do.cvs.tag}""/>
- </exec>
- </then>
- <else>
- <ant dir="${lb.subsystem.dir}.logger"
- target="${target}"
- inheritall="false" />
- </else>
- </if>
- </target>
-
- <target name="utils" unless="setenvonly" depends="envset, globus, expat, c-ares, client, server">
- <if>
- <isset property="small.memory"/>
- <then>
- <exec dir="${lb.subsystem.dir}.utils" executable="${antExec}" failonerror="${failonerror}">
- <arg line="${target} -Dsmall.memory=true -Dbootstrap=${bootstrap} -Dfailonerror=${failonerror} -Ddo.cvs.tag=${do.cvs.tag}"/>
- </exec>
- </then>
- <else>
- <ant dir="${lb.subsystem.dir}.utils"
- target="${target}"
- inheritall="false" />
- </else>
- </if>
- </target>
-
- <!-- Main proxy -->
- <target name="buildmodules" depends="envset,
- utils,
- client-interface,
- ws-interface,
- client,
- server-bones,
- common,
- server,
- proxy,
- logger">
- <echo append="true" file="${global.project.dir}/cruisecontrol-stub.xml">
- <project name="${subsystem.name}" type="post-subsystem" packageName="${global.prefix}-${subsystem.prefix}"/>
- </echo>
- </target>
-
-</project>
+++ /dev/null
-deploy_lb - against glite-deployment-lb_R_2_1_2
-deploy_jp - against HEAD
+++ /dev/null
-Index: org.glite/project/global.dependencies.properties
-===================================================================
-RCS file: /cvs/jra1mw/org.glite/project/global.dependencies.properties,v
-retrieving revision 1.486
-diff -u -r1.486 global.dependencies.properties
---- org.glite/project/global.dependencies.properties 17 Mar 2006 18:19:31 -0000 1.486
-+++ org.glite/project/global.dependencies.properties 30 Mar 2006 11:32:58 -0000
-@@ -1352,6 +1352,25 @@
- ext.globus-sdk.rpm.version = ${ext.vdt.version}
- ext.globus-sdk.rpm.age = 1
-
-+# Globus vdt data server - grid ftp server
-+ext.globus-data-server.name = ${ext.globus.name}
-+ext.globus-data-server.vendor = ${ext.globus.vendor}
-+ext.globus-data-server.version = ${ext.globus.version}
-+ext.globus-data-server.platform = ${platform}
-+ext.globus-data-server.subdir = ${ext.globus-data-server.name}/${ext.globus-data-server.version}/${ext.globus-data-server.platform}
-+ext.globus-data-server.rep.base = ${jra1.rep.base}
-+ext.globus-data-server.rep.file =
-+ext.globus-data-server.rep.subdir =
-+ext.globus-data-server.rep.url = ${ext.globus-data-server.rep.base}/${ext.globus-data-server.subdir}/${ext.globus-data-server.rep.subdir}/${ext.globus-data-server.rep.file}
-+ext.globus-data-server.files =
-+ext.globus-data-server.download = http://www.cs.wisc.edu/vdt/releases/1.2.2/installing-rpms.html
-+ext.globus-data-server.homepage = http://www.cs.wisc.edu/vdt//index.html
-+ext.globus-data-server.description = The Globus Toolkit(R). This is the version packaged by VDT.
-+ext.globus-data-server.rpm.name = vdt_globus_data_server
-+ext.globus-data-server.rpm.version = ${ext.vdt.version}
-+ext.globus-data-server.rpm.age = 1
-+
-+
- # GPT
- ext.gpt.name = gpt
- ext.gpt.vendor = gpt
-Index: org.glite.deployment/build.xml
-===================================================================
-RCS file: /cvs/jra1mw/org.glite.deployment/build.xml,v
-retrieving revision 1.62
-diff -u -r1.62 build.xml
---- org.glite.deployment/build.xml 22 Aug 2005 17:00:02 -0000 1.62
-+++ org.glite.deployment/build.xml 30 Mar 2006 11:32:58 -0000
-@@ -814,6 +814,39 @@
- </if>
- </target>
-
-+ <target name="jpps" unless="setenvonly" depends="envset,config">
-+ <if>
-+ <isset property="small.memory"/>
-+ <then>
-+ <exec dir="${deployment.subsystem.dir}.jpps" executable="${antExec}" failonerror="${failonerror}">
-+ <arg line="${target} "-Dsmall.memory=true" "-Dbootstrap=${bootstrap}" "-Dfailonerror=${failonerror}" "-Ddo.cvs.tag=${do.cvs.tag}" "-Dbuild.name=${build.name}""/>
-+ </exec>
-+ </then>
-+ <else>
-+ <ant dir="${deployment.subsystem.dir}.jpps"
-+ target="${target}"
-+ inheritall="false" />
-+ </else>
-+ </if>
-+ </target>
-+
-+
-+ <target name="jpis" unless="setenvonly" depends="envset,config">
-+ <if>
-+ <isset property="small.memory"/>
-+ <then>
-+ <exec dir="${deployment.subsystem.dir}.jpis" executable="${antExec}" failonerror="${failonerror}">
-+ <arg line="${target} "-Dsmall.memory=true" "-Dbootstrap=${bootstrap}" "-Dfailonerror=${failonerror}" "-Ddo.cvs.tag=${do.cvs.tag}" "-Dbuild.name=${build.name}""/>
-+ </exec>
-+ </then>
-+ <else>
-+ <ant dir="${deployment.subsystem.dir}.jpis"
-+ target="${target}"
-+ inheritall="false" />
-+ </else>
-+ </if>
-+ </target>
-+
- <target name="io-server" unless="setenvonly" depends="envset,config">
- <if>
- <isset property="small.memory"/>
-@@ -891,6 +924,8 @@
- wn,
- wms,
- lb,
-+ jpps,
-+ jpis,
- io-server,
- io-client,
- ce,
-Index: org.glite.deployment/project/dependencies.properties
-===================================================================
-RCS file: /cvs/jra1mw/org.glite.deployment/project/dependencies.properties,v
-retrieving revision 1.528
-diff -u -r1.528 dependencies.properties
---- org.glite.deployment/project/dependencies.properties 22 Aug 2005 17:00:02 -0000 1.528
-+++ org.glite.deployment/project/dependencies.properties 30 Mar 2006 11:32:58 -0000
-@@ -38,6 +38,8 @@
- org.glite.deployment.wn.version = HEAD
- org.glite.deployment.wms.version = HEAD
- org.glite.deployment.lb.version = HEAD
-+org.glite.deployment.jpps.version = HEAD
-+org.glite.deployment.jpis.version = HEAD
- org.glite.deployment.io-server.version = HEAD
- org.glite.deployment.io-client.version = HEAD
- org.glite.deployment.ce.version = HEAD
-Index: org.glite.deployment/project/glite.deployment.csf.xml
-===================================================================
-RCS file: /cvs/jra1mw/org.glite.deployment/project/glite.deployment.csf.xml,v
-retrieving revision 1.45
-diff -u -r1.45 glite.deployment.csf.xml
---- org.glite.deployment/project/glite.deployment.csf.xml 22 Aug 2005 17:00:02 -0000 1.45
-+++ org.glite.deployment/project/glite.deployment.csf.xml 30 Mar 2006 11:32:58 -0000
-@@ -389,6 +389,14 @@
- <equals arg1="${org.glite.deployment.lb.version}" arg2="HEAD" />
- </condition>
-
-+ <condition property="jpps.head">
-+ <equals arg1="${org.glite.deployment.jpps.version}" arg2="HEAD" />
-+ </condition>
-+
-+ <condition property="jpis.head">
-+ <equals arg1="${org.glite.deployment.jpis.version}" arg2="HEAD" />
-+ </condition>
-+
- <condition property="io-server.head">
- <equals arg1="${org.glite.deployment.io-server.version}" arg2="HEAD" />
- </condition>
-@@ -832,6 +840,28 @@
- tag="${org.glite.deployment.lb.version}" />
- </target>
-
-+ <!-- jpps component -->
-+ <target name="jpps" depends="get.jpps.head, get.jpps.tag"/>
-+ <target name="get.jpps.head" if="jpps.head">
-+ <cvs-co package="org.glite.deployment.jpps" />
-+ </target>
-+
-+ <target name="get.jpps.tag" unless="jpps.head">
-+ <cvs-co package="org.glite.deployment.jpps"
-+ tag="${org.glite.deployment.jpps.version}" />
-+ </target>
-+
-+ <!-- jpis component -->
-+ <target name="jpis" depends="get.jpis.head, get.jpis.tag"/>
-+ <target name="get.jpis.head" if="jpis.head">
-+ <cvs-co package="org.glite.deployment.jpis" />
-+ </target>
-+
-+ <target name="get.jpis.tag" unless="jpis.head">
-+ <cvs-co package="org.glite.deployment.jpis"
-+ tag="${org.glite.deployment.jpis.version}" />
-+ </target>
-+
- <!-- io-server component -->
- <target name="io-server" depends="get.io-server.head, get.io-server.tag"/>
- <target name="get.io-server.head" if="io-server.head">
+++ /dev/null
-Index: org.glite.deployment.lb/config/scripts/glite-lb-config.py
-===================================================================
-RCS file: /cvs/jra1mw/org.glite.deployment.lb/config/scripts/glite-lb-config.py,v
-retrieving revision 1.66
-diff -u -r1.66 glite-lb-config.py
---- org.glite.deployment.lb/config/scripts/glite-lb-config.py 13 Mar 2006 15:22:31 -0000 1.66
-+++ org.glite.deployment.lb/config/scripts/glite-lb-config.py 15 Mar 2006 14:51:40 -0000
-@@ -120,7 +120,12 @@
-
- if not os.path.exists('/tmp/mysql.sock'):
- os.symlink('/var/lib/mysql/mysql.sock', '/tmp/mysql.sock')
--
-+
-+
-+ #-------------------------------------------------------------------
-+ # start bkserver
-+ #-------------------------------------------------------------------
-+
- pid = glib.getPID('bkserverd')
- if pid != 0:
- print 'The gLite LB Server service is already running. Restarting...'
-@@ -133,7 +138,7 @@
- pid = glib.getPID('bkserverd')
-
- if (pid != 0):
-- print "The gLite LB Server service has been started ",
-+ print "The gLite LB Server service has been started ",
- glib.printOkMessage()
- else:
- glib.printErrorMessage("Could not start the gLite LB Server service")
-@@ -142,6 +147,36 @@
- return 1
-
- #-------------------------------------------------------------------
-+ # start jp-importer, if enabled
-+ #-------------------------------------------------------------------
-+
-+ lb_export = 0
-+ if params.has_key('lb.export.enabled'):
-+ if params['lb.export.enabled'] == "true":
-+ lb_export = 1
-+
-+ if lb_export:
-+ pid = glib.getPID('jp-importer')
-+ if pid != 0:
-+ print 'The gLite JP Importer service is already running. Restarting...'
-+ os.system('%s/etc/init.d/glite-jp-importer stop' % os.environ['GLITE_LOCATION'])
-+ else:
-+ print 'Starting the gLite JP Importer service...'
-+
-+ os.system('%s/etc/init.d/glite-jp-importer start' % os.environ['GLITE_LOCATION'])
-+
-+ pid = glib.getPID('jp-importer')
-+
-+ if (pid != 0):
-+ print "The gLite JP Importer service has been started ",
-+ glib.printOkMessage()
-+ else:
-+ glib.printErrorMessage("Could not start the gLite JP Importer service")
-+ glib.printErrorMessage("Please verify and re-run the script "),
-+ glib.printFailedMessage()
-+ return 1
-+
-+ #-------------------------------------------------------------------
- # Start Servicetool
- #-------------------------------------------------------------------
-
-@@ -161,6 +196,10 @@
- if (pid != 0):
- os.system('%s/etc/init.d/glite-lb-bkserverd stop' % os.environ['GLITE_LOCATION'])
-
-+ #-------------------------------------------------------------------
-+ # Book Keeping Server
-+ #-------------------------------------------------------------------
-+
- pid = glib.getPID('bkserverd')
- if (pid != 0):
- print 'Could not stop the LB Server service ',
-@@ -171,6 +210,25 @@
- glib.printOkMessage()
-
- #-------------------------------------------------------------------
-+ # JP Importer
-+ #-------------------------------------------------------------------
-+
-+ pid = glib.getPID('jp-importer')
-+ if (pid != 0):
-+ os.system('%s/etc/init.d/glite-jp-importer stop' % os.environ['GLITE_LOCATION'])
-+
-+ pid = glib.getPID('jp-importer')
-+ if (pid != 0):
-+ print 'Could not stop the JP Importer service ',
-+ glib.printFailedMessage()
-+ error_level = 1
-+ else:
-+ if params.has_key('lb.export.enabled'):
-+ if params['lb.export.enabled'] == "true":
-+ print 'The JP Importer service has been stopped ',
-+ glib.printOkMessage()
-+
-+ #-------------------------------------------------------------------
- # MySQL
- #-------------------------------------------------------------------
-
-@@ -195,6 +253,10 @@
- if retval != 0:
- error_level = 1
-
-+ retval = os.system('%s/etc/init.d/glite-jp-importer status' % os.environ['GLITE_LOCATION'])
-+ if retval != 0:
-+ error_level = 1
-+
- #-------------------------------------------------------------------
- # Servicetool
- #-------------------------------------------------------------------
-@@ -325,7 +387,27 @@
- print "\n==> MySQL database %s already exist\n" % params['lb.database.name']
-
- self.mysql.stop()
--
-+
-+ # ------------------------------------------------------------------
-+ # export from bkserver to cron
-+ # ------------------------------------------------------------------
-+ if params['GLITE_LB_EXPORT_ENABLED'] == "true":
-+ file = open('%s/etc/glite-lb-export-cron-wrapper.sh' % os.environ['GLITE_LOCATION'], 'w')
-+ file.write('#! /bin/sh\n')
-+ file.write('. %s\n' % glib.getInstallerExportFile())
-+ file.write('$GLITE_LOCATION/examples/glite-lb-export.sh\n')
-+ file.close()
-+ os.system('/bin/chmod 0755 %s/etc/glite-lb-export-cron-wrapper.sh' % os.environ['GLITE_LOCATION'])
-+
-+ file = open('/etc/cron.d/glite-lb-export.cron', 'w')
-+ file.write('# periodically run purge and export jobs from bkserver\n')
-+ file.write('00,30 * * * * %s %s/etc/glite-lb-export-cron-wrapper.sh\n' % (params['glite.user.name'], os.environ['GLITE_LOCATION']))
-+ file.close()
-+ os.system('/bin/chmod 0755 /etc/cron.d/glite-lb-export.cron')
-+
-+ # Touch cron spool directory to cause reloading of the crontabs
-+ os.system("/bin/touch /var/spool/cron")
-+
- #-------------------------------------------------------------------
- # RGMA servicetool: configure servicetool
- #-------------------------------------------------------------------
-@@ -411,6 +493,17 @@
- # Perl
- glib.addEnvPath("PERL5LIB", "%s/lib/perl:%s/lib/perl5" % (os.environ['GPT_LOCATION'],os.environ['GLITE_LOCATION']))
-
-+ # LB export
-+ glib.export('GLITE_LB_EXPORT_ENABLED', params['lb.export.enabled']);
-+ glib.export('GLITE_LB_EXPORT_BKSERVER', params['lb.export.bkserver']);
-+ glib.export('GLITE_LB_EXPORT_JPPS', params['lb.export.jpps']);
-+ glib.export('GLITE_LB_EXPORT_JPREG_MAILDIR', params['lb.export.jpreg']);
-+ glib.export('GLITE_LB_EXPORT_JPDUMP_MAILDIR', params['lb.export.jpdump']);
-+ glib.export('GLITE_LB_EXPORT_DUMPDIR', params['lb.export.dump']);
-+ glib.export('GLITE_LB_EXPORT_DUMPDIR_OLD', params['lb.export.dump.old']);
-+ glib.export('GLITE_LB_EXPORT_EXPORTDIR', params['lb.export.export']);
-+ glib.export('GLITE_LB_EXPORT_PURGE_ARGS', '"%s"' % params['lb.export.purgeargs']);
-+
- # Set environment
- glib.setUserEnv()
-
-Index: org.glite.deployment.lb/config/templates/glite-lb.cfg.xml
-===================================================================
-RCS file: /cvs/jra1mw/org.glite.deployment.lb/config/templates/glite-lb.cfg.xml,v
-retrieving revision 1.21
-diff -u -r1.21 glite-lb.cfg.xml
---- org.glite.deployment.lb/config/templates/glite-lb.cfg.xml 13 Mar 2006 15:19:21 -0000 1.21
-+++ org.glite.deployment.lb/config/templates/glite-lb.cfg.xml 15 Mar 2006 14:51:40 -0000
-@@ -17,9 +17,16 @@
- parameter. Leave it empty of comment it out to use the same as 'glite.user.name'"
- value="changeme"/>
-
-- <mysql.root.password
-- description="The mysql root password"
-- value="changeme"/>
-+ <mysql.root.password
-+ description="The mysql root password"
-+ value="changeme"/>
-+
-+ <lb.export.jpps
-+ description="Job Provenance Primary Storage.
-+ [Example: localhost:8901][Type: string]"
-+ value="changeme"/>
-+
-+
-
- <!-- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx -->
- <!-- Advanced parameters - Change them if you know what you're doing -->
-@@ -55,6 +62,12 @@
- [Example: 17M][Type: Integer][Unit: MB]"
- value="17M"/>
-
-+ <lb.export.enabled
-+ description="Enables exports to Job Provenance.
-+ [Example: true][Type: boolean]"
-+ value="true"/>
-+
-+
- <!-- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx -->
- <!-- System parameters - You should leave these alone -->
- <!-- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx -->
-@@ -67,7 +80,42 @@
- <value>destination</value>
-
- </lb.index.list>
--
-+
-+ <lb.export.bkserver
-+ description="Book Keeping Server service when differs from default port on localhost.
-+ [Example: localhost:9000][Type: string]"
-+ value=""/>
-+
-+ <lb.export.jpreg
-+ description="Maildir for job registrations.
-+ [Example: /tmp/jpreg][Type: string]"
-+ value=""/>
-+
-+ <lb.export.jpdump
-+ description="Maildir for job dumps.
-+ [Example: /tmp/jpdump][Type: string]"
-+ value=""/>
-+
-+ <lb.export.dump
-+ description="Dump directory for purged jobs.
-+ [Example: /tmp/dump][Type: string]"
-+ value=""/>
-+
-+ <lb.export.dump.old
-+ description="Dump directory for handled purged jobs.
-+ [Example: /tmp/dump.old][Type: string]"
-+ value=""/>
-+
-+ <lb.export.export
-+ description="Exported events divided to file per job.
-+ [Example: /tmp/lb_export][Type: string]"
-+ value=""/>
-+
-+ <lb.export.purgeargs
-+ description="Purge arguments (with timeouts).
-+ [Example: -a 1h -c 1h -n 1h -o 1d][Type: string]"
-+ value="-a 1d -c 1d -n 1d -o 7d"/>
-+
- </parameters>
-
- </config>
-Index: org.glite.deployment.lb/project/glite-lb.sdf.xml.template
-===================================================================
-RCS file: /cvs/jra1mw/org.glite.deployment.lb/project/glite-lb.sdf.xml.template,v
-retrieving revision 1.38
-diff -u -r1.38 glite-lb.sdf.xml.template
---- org.glite.deployment.lb/project/glite-lb.sdf.xml.template 13 Mar 2006 15:19:21 -0000 1.38
-+++ org.glite.deployment.lb/project/glite-lb.sdf.xml.template 15 Mar 2006 14:51:40 -0000
-@@ -57,6 +57,12 @@
- build="@org.glite.lb.server-bones.info.build@"
- arch="i386"/>
-
-+ <component name="glite-lb-client"
-+ version="@org.glite.lb.client.info.version@"
-+ age="@org.glite.lb.client.info.age@"
-+ build="@org.glite.lb.client.info.build@"
-+ arch="i386"/>
-+
- <component name="glite-wms-utils-jobid"
- version="@org.glite.wms-utils.jobid.info.version@"
- age="@org.glite.wms-utils.jobid.info.age@"
-@@ -105,6 +111,12 @@
- build="@org.glite.jp.ws-interface.info.build@"
- arch="i386"/>
-
-+ <component name="glite-jp-client"
-+ version="@org.glite.jp.client.info.version@"
-+ age="@org.glite.jp.client.info.age@"
-+ build="@org.glite.jp.client.info.build@"
-+ arch="i386"/>
-+
- <component name="gridsite"
- version="@org.gridsite.core.info.version@"
- age="@org.gridsite.core.info.age@"
+++ /dev/null
-These are the instructions how to build LB for VDT:
-
-$ mkdir lb4vdt
-$ cd lb4vdt
-$ export CVSROOT=:pserver:anonymous@jra1mw.cvs.cern.ch:/cvs/jra1mw
-$ cvs co org.glite.lb
-
-you may need to edit ./org.glite.lb/lb4vdt/Makefile.inc
-to specify some paths (some of them should be set automaticaly,
-e.g. by VDT_LOCATION)
-
-and then (still from the top directory) run
-
-$ ./org.glite.lb/lb4vdt/LB_install.sh 2>&1 | tee log
-
-:)
+++ /dev/null
-%
-% Official text received on October 6, 2004
-%
-\vfill{\bf Copyright }\copyright{\bf Members of the EGEE Collaboration. 2004.
-See http://eu-egee.org/partners for details on the copyright holders.
-
-EGEE (``Enabling Grids for E-science in Europe'') is a project funded by
-the European Union. For more information on the project, its partners
-and contributors please see http://www.eu-egee.org.
-
-You are permitted to copy and distribute verbatim copies of this
-document containing this copyright notice, but modifying this document
-is not allowed. You are permitted to copy this document in whole or in
-part into other documents if you attach the following reference to the
-copied elements: ``Copyright }\copyright{\bf 2004. Members of the EGEE
-Collaboration. http://www.eu-egee.org''
-
-The information contained in this document represents the views of
-EGEE as of the date they are published. EGEE does not guarantee that
-any information contained herein is error-free, or up to date.
-
-EGEE MAKES NO WARRANTIES, EXPRESS, IMPLIED, OR STATUTORY, BY
-PUBLISHING THIS DOCUMENT.}
-
+++ /dev/null
-\begin{center}
-{\bf Delivery Slip}
-\end{center}
-\begin{tabularx}{\textwidth}{|l|l|l|X|X|}
-\hline
- & {\bf Name} & {\bf Partner} & {\bf Date} & {\bf Signature} \\
-\hline
-{\bf From} & Ale\v s K\v{r}enek et al.& CESNET & May 28, 2005 & \\
-\hline
-{\bf Reviewed by} & & & & \\
-
-\hline
-{\bf Approved by} & & & & \\
-\hline
-\end{tabularx}
-
-\begin{center}
-{\bf Document Change Log}
-\end{center}
-
-\begin{tabularx}{\textwidth}{|l|l|X|X|}
-\hline
-{\bf Issue } & {\bf Date } & {\bf Comment } & {\bf Author } \\ \hline
-Initial version & Feb 28, 2005 & & A. K\v{r}enek \\ \hline
-Reviewer's comments & Mar 25, 2005 & comments reflected & A. K\v{r}enek \\
-\hline
-\end{tabularx}
-
-\begin{center}
-{\bf Document Change Record}
-\end{center}
-
-\begin{tabularx}{\textwidth}{|l|l|X|}
-\hline
-{\bf Issue } & {\bf Item } & {\bf Reason for Change } \\ \hline
-
-
-\hline
-\end{tabularx}
-
-\input{copyright}
+++ /dev/null
-\documentclass{egee}
-\usepackage{comment}
-
-\def\LB{L\&B}
-
-\title{\LB\ Test Plan}
-\author{CESNET EGEE JRA1 team}
-\DocIdentifier{EGEE-JRA1-??}
-\Date{\today}
-\Activity{JRA1: Middleware Engineering and Integration}
-\DocStatus{DRAFT}
-\Dissemination{PUBLIC}
-\DocumentLink{}
-
-\def\req{\noindent\textbf{Prerequisities:}}
-\def\how{\noindent\textbf{How to run:}}
-\def\result{\noindent\textbf{Expected result:}}
-
-\def\path#1{{\normalfont\textsf{#1}}}
-\def\code#1{\texttt{#1}}
-
-\def\todo#1{\textbf{TODO:} #1}
-
-
-\specialcomment{hints}{\par\noindent\textbf{Hints: }\begingroup\slshape}{\endgroup}
-%\includecomment{hints}
-
-\begin{document}
-
-\input{frontmatter}
-\newpage
-\tableofcontents
-\newpage
-
-\section{Rationale}
-\todo{}
-
-\section{Test Coverage}
-\todo{}
-
-\section{Test Cases}
-
-\subsection{Event delivery}
-
-% locallogger
-% bez dalsich demonu, registrovat job, vrati EAGAIN, objevi se fajly
-\subsubsection{Standalone locallogger -- job registration}
-\label{reg}
-\req\ running \path{glite-lb-logd} on the test node, don't start either
-\path{glite-lb-interlogd} or \path{glite-lb-bkserverd}
-
-\how\ call \code{edg\_wll\_RegisterJob}. Jobid's should preferably point
-to a~remote \LB\ server.
-
-\result\ The API call returns EAGAIN, but locallogger creates an event file
-in its storage.
-The file should contain single line RegJob event.
-
-\begin{hints}
-\path{glite-lb-regjob} example can be used. It generates a~unique jobid,
-prints it and calls \LB\ API appropriately.
-\end{hints}
-
-% async -- prida do fajlu, OK
-% logevent
-\subsubsection{Standalone locallogger -- log event}
-\label{log}
-\req\ running \path{glite-lb-logd} only, files generated in test~\ref{reg}.
-
-\how\ call \code{edg\_wll\_Log*} for various event types in a~sequence
-resebmling real \LB\ usage, using the same jobid's as in test~\ref{reg}
-
-\result\ API calls return 0, events are added one per line to the locallogger files
-
-\begin{hints}
-\path{glite-lb-logev} client program can be used.
-
-\path{glite-lb-*.sh} examples may be adapted to produce reasonable seqences
-of events.
-\end{hints}
-
-\subsubsection{Interlogger recovery}
-\label{recover}
-% recover interloggeru
-% il & server (remote)
-% spustit, protlaci soubory na server, soubory zmizi, lze se dotazat na stav
-\req\ running \path{glite-lb-bkserverd} on the machine and port where
-jobid's from \ref{reg} point to; files generated in~\ref{log}.
-
-\how\ Make a~copy of the files created in~\ref{log}, then start
-\path{glite-lb-interlogd}. After approx. 10s check the jobs
-with \code{edg\_wll\_JobLog} call.
-
-\result \code{edg\_wll\_JobLog} should return the same events that were
-contained in the locallogger files. The files should be removed by
-interlogger after approx. 1 min.
-
-\begin{hints}
-\path{glite-lb-joblog} example outputs the events in (almost) the same
-format as the locallogger files.
-\end{hints}
-
-% event delivery
-% poslat .sh, job log vrati to, co bylo ve fajlech
-\subsubsection{Normal event delivery}
-\label{normal}
-\req\ all \LB\ daemons running (\path{glite-lb-logd}, \path{glite-lb-interlogd},
-\path{glite-lb-bkserverd}
-
-\how\
-\begin{enumerate}
-
-\item Register jobs with \code{edg\_wll\_RegsterJob}
-\item Log reasonable sequences of events with \code{edg\_wll\_Log*}.
-\item Check with \code{edg\_wll\_JobLog}
-that the events got delivered afterwards (approx. 10s).
-\end{enumerate}
-
-\result\ API calls should return 0. The same events that were logged must be returned.
-
-\begin{hints}
-\path{glite-lb-*.sh} scripts produce reasonable seqences of events, including
-the job initial registration.
-
-There is approx. 1min time window in which the locallogger files exist.
-They can be grabbed and used for comparing the events as in~\ref{recover}.
-
-\end{hints}
-
-\subsection{Job state computation}
-
-% normal event delivery & job state machine
-% .sh, dotaz na stav
-\subsubsection{Normal job states}
-\label{state}
-\req\ \path{glite-lb-bkserverd} running, events from \ref{normal} logged.
-
-\how\ Check state of the jobs with \code{edg\_wll\_JobStatus}.
-
-\result\ The API call should return 0, the jobs should be in the expected
-states. Thorough tests may also cross check the values supplied in the
-events (e.g. destination computing element) wrt. the values reported in the job states.
-
-\begin{hints}
-\path{glite-lb-*.sh} scripts produce sequences of events resultning
-in the job state same as the `*' part of the script name.
-\end{hints}
-
-\subsubsection{DAG job states}
-\todo{}
-% specialni stav DAGu, histogram potomku
-
-\subsection{LB proxy}
-\req\ running \path{glite-lb-proxy}, \path{glite-lb-interlogd} and
-\path{glite-lb-bkserverd}
-
-\how\ Register jobs with \code{edg\_wll\_RegisterJobProxy}, log events
-using \code{edg\_wll\_LogEventProxy} and check the job states against
-both lbproxy (using \code{edg\_wll\_JobStatusProxy}) and bkserver
-(using \code{edg\_wll\_JobStatus}).
-
-\result\ A new job state should be available immediately at the
-lbproxy and probably with a small delay also at the bkserver.
-
-\begin{hints}
-There is already a script \path{test.sh} in
-\path{org.glite.lb.proxy/examples} that can be used together with
-above mentioned scripts \path{glite-lb-*.sh} (they are called from
-\path{test.sh}) to test all this.
-\end{hints}
-
-\subsection{WS interface}
-\req\ \path{glite-lb-bkserverd} running, events from \ref{normal} logged
-
-\how\ retrieve both events and job states with the \LB\ WS interface
-(operations \code{JobStatus}, \code{QueryEvents}).
-
-\result\ the returened data should match those returned by the legacy
-API calls.
-
-\begin{hints}
-Examples \path{org.glite.lb.server/examples/ws\_*.c} convert the WS
-responses back to the legacy \LB\ data structures and print them in
-the same form as e.g. \path{glite-lb-jobstat}.
-\end{hints}
-
-\subsection{Notifications}
-
-% notifikace
-% regjob, reg notifikace na vsechno, poslat udalosti, hlidat notif
-\subsubsection{Single job, any state change}
-\label{notif1}
-\req\ All \LB\ services running
-
-\how
-\begin{enumerate}
-\item Register a job.
-\item Start a~notification client (preferably on another machine),
-register with \code{edg\_wll\_NotifNew} for any state changes of the job,
-and repeatedly invoke \code{edg\_wll\_NotifReceive}.
-\item One by one send events triggering job state change.
-\end{enumerate}
-
-\result\ All the events should trigger notification reported by the running
-notification client.
-
-\begin{hints}
-\path{glite-lb-notify} example can be used with its \path{test} command.
-\end{hints}
-
-\subsubsection{Additional notification criteria}
-\label{notif-complex}
-\req\ All \LB\ services running
-
-\how\ Like~\ref{notif1} but include additional criteria,
-e.g. job is scheduled for a~specific destination.
-
-\result\ Only notifications matching the criteria should be delivered.
-
-% rozsireni dotazu o dalsi job
-\subsubsection{Include another job}
-\label{notif2}
-\req\ All \LB\ services running, notification from \ref{notif1} still active
-
-\how\
-\begin{enumerate}
-\item Register another job.
-\item Augment the notification registration with the new jobid using
-\code{edg\_wll\_NotifChange}.
-\item Start notification client, bind to the registration with
-\code{edg\_wll\_NotifBind}.
-\item Send events for the new job.
-\end{enumerate}
-
-\result\ Notifications should be received by the client.
-
-\begin{hints}
-Commands \path{change} and \path{receive} of \path{glite-lb-notify}
-can be used.
-\end{hints}
-
-% notifikace -- zmena adresy/portu
-% pak poslat udalost, musi dojit
-% uz je v predchozim implicitne
-
-\subsubsection{Delayed delivery}
-% notifikace -- zpozdene doruceni
-% registrovat, odpojit, poslat udalosti, pripojit se
-
-\req\ All \LB\ services running
-
-\how\
-\begin{enumerate}
-\item Register another job.
-\item Register a~notification as in~\ref{notif1} but terminate the client
-immediately.
-\item Log events for the job.
-\item Restart the client, binding to the notification and call
-\code{edg\_wll\_NotifReceive} repeatedly.
-\end{enumerate}
-
-\result\ Delayed notifications should be received by the client almost
-immediately.
-
-\subsection{Server purge}
-
-\textbf{WARNING: This test is destructive, it destroys ALL data in an
-existing \LB\ database.}
-
-The test is fairly complex but it does not make too much sense to split it
-artificially.
-
-\req\ All \LB services running, preferably a~dedicated server for this test.
-
-\how
-\begin{enumerate}
-\item Purge all data on the server with \path{glite-lb-purge}
-\item Log two sets of jobs, separated with delay of at least 60s so
-that the sets can be distinguished from each other.
-\item \label{purgel}
-Using \code{edg\_wll\_JobLog} retrieve events of all the jobs
-\item \label{purge1}
-Purge the first set of jobs (by specifying appropriate timestamp),
-letting the server dump the purged events.
-\item \label{purge2} Purge the other set of jobs, also dumping the events.
-\item \label{purge3} Run purge once more.
-\end{enumerate}
-
-\result\ The data dumped in steps \ref{purge1}, \ref{purge2} should be the
-same as retrieved in~\ref{purgel}. The final purge invocation should
-do nothing (i.e. nothing was left in the database).
-
-% test_purge
-\begin{hints}
-The example \path{glite-lb-test\_purge} does exactly this sequence of steps,
-including the checks.
-\end{hints}
-
-
-\end{document}
+++ /dev/null
-#!/bin/sh
-
-set -e
-
-#OFFLINE=true
-TOPDIR=${PWD}
-
-export LB4VDTDIR=${TOPDIR}/org.glite.lb/lb4vdt
-export STAGEDIR=${TOPDIR}/stage
-
-if [ ! -f ${LB4VDTDIR}/Makefile.inc ]; then
- echo "Error: There is no ${LB4VDTDIR}/Makefile.inc. Exiting."
- exit 1
-fi
-
-if [ -z "${CVSROOT}" ]; then
- echo "XXX"
- export CVSROOT=:pserver:anonymous@jra1mw.cvs.cern.ch:/cvs/jra1mw
-# export CVSROOT=:ext:jpospi@jra1mw.cvs.cern.ch:/cvs/jra1mw
-fi
-
-dep_modules="org.glite.wms-utils.jobid
-org.gridsite.core"
-
-modules="org.glite.security.gsoap-plugin
-org.glite.lb.client-interface
-org.glite.lb.common
-org.glite.lb.client
-org.glite.lb.logger
-org.glite.lb.ws-interface
-org.glite.lb.server-bones
-org.glite.lb.server
-org.glite.lb.proxy"
-#org.glite.lb.utils
-
-for i in $dep_modules;
-do
- echo "*********************************************************"
- echo "* Module $i"
- echo "*********************************************************"
- cd ${TOPDIR}
- if [ -n "${OFFLINE}" ]; then
- echo "Working offline"
- else
- echo "Getting sources from CVS"
- cvs co -A $i;
- fi
- if [ -d $i -a -f ${LB4VDTDIR}/patches/$i.patch -a ! -f .$i.patched ]; then
- echo "Patching $i"
- patch -p0 < ${LB4VDTDIR}/patches/$i.patch
- touch .$i.patched
- fi
- if [ -d $i ]; then
- if [ -f ${LB4VDTDIR}/scripts/$i.build ]; then
- echo "Building"
- sh -x ${LB4VDTDIR}/scripts/$i.build
- fi
- else
- echo "WARNING: directory $i not found"
- fi
-done
-
-for i in $modules;
-do
- echo "*********************************************************"
- echo "* Module $i"
- echo "*********************************************************"
- cd ${TOPDIR}
- if [ -n "${OFFLINE}" ]; then
- echo "Working offline"
- else
- echo "Getting sources from CVS"
- cvs co -A $i;
- fi
- if [ -d $i -a -f ${LB4VDTDIR}/patches/$i.patch -a ! -f .$i.patched ]; then
- echo "Patching $i"
- patch -p0 < ${LB4VDTDIR}/patches/$i.patch
- touch .$i.patched
- fi
- if [ -d $i ]; then
- echo "Entering directory ${TOPDIR}/$i"
- cd ${TOPDIR}/$i
- echo "Copying supporting files"
- cp -rv ${TOPDIR}/org.glite.lb/project/{at3,*.T,*.pm} ./project/
- mkdir -p build
- echo "Entering directory ${TOPDIR}/$i/build"
- cd build
- ln -fsv ../Makefile
-# ln -fsv ../../Makefile.inc Makefile.inc
- ln -fsv ${LB4VDTDIR}/Makefile.inc
- echo "Building"
- make LB_STANDALONE=yes
- make stage LB_STANDALONE=yes
- else
- echo "WARNING: directory $i not found"
- fi
- echo "Done"
-done
-
-cd ${TOPDIR}
+++ /dev/null
-# external dependencies:
-
-# missing packages could be downloaded to repository from
-# http://egee-jra1-integration.web.cern.ch/egee-jra1-integration/repository/
-#repository=/home/honik/egee/repository
-
-#vdt_location=/home/honik/egee/vdt-1.3.11
-vdt_location=${VDT_LOCATION}
-
-
-#cares_prefix=${repository}/c-ares/1.3.0/rhel30_gcc32
-cares_prefix=/software/cares-1.3
-
-#classads_prefix=${repository}/classads/0.9.6/rhel30_gcc32
-classads_prefix=${vdt_location}/classads
-
-# not needed (used "only" for unit tests - 'make check'):
-#cppunit_prefix=${repository}/cppunit/1.10.2/rhel30_gcc32
-cppunit_prefix=/software/cppunit-1.10.2
-
-#expat_prefix=/usr
-expat_prefix=${vdt_location}/expat
-
-#globus_prefix=${repository}/globus/2.4.3-VDT-1.2.2/rhel30_gcc32
-globus_prefix=${vdt_location}/globus
-
-#gridsite_prefix=${stagedir}
-
-gsoap_default_version=2.7.6b
-gsoap_versions=${gsoap_default_version}
-#gsoap_name=gsoap
-#gsoap_platform=rhel30_gcc32
-#gsoap_prefix=${repository}/${gsoap_name}/${gsoap_default_version}/${gsoap_platform}
-gsoap_prefix=/software/gsoap-${gsoap_default_version}
-
-#mysql_prefix=${repository}/mysql/4.1.11/rhel30_gcc32
-mysql_prefix=${vdt_location}/mysql
-mysql_version=4.1.11
-
-voms_prefix=${vdt_location}/glite
-#voms_prefix=/home/honik/egee/glite/stage
-
-
-# some defaults:
-
-#PREFIX=${vdt_location}/glite
-PREFIX=/tmp/lb4vdt
-globalprefix=glite
-lbprefix=lb
-
-builddir=build
-distdir=${STAGEDIR}/../dist
-stagedir=${STAGEDIR}
-top_srcdir=..
-
-thrflavour=gcc32dbgpthr
-nothrflavour=gcc32dbg
-
-# needed by org.glite.lb.client:
-glite_location=${stagedir}
-
+++ /dev/null
-Index: org.gridsite.core/src/Makefile
-===================================================================
-RCS file: /cvs/jra1mw/org.gridsite.core/src/Makefile,v
-retrieving revision 1.48
-diff -u -r1.48 Makefile
---- org.gridsite.core/src/Makefile 18 Apr 2006 10:59:01 -0000 1.48
-+++ org.gridsite.core/src/Makefile 21 Apr 2006 14:14:53 -0000
-@@ -56,9 +56,9 @@
- # Build
- #
-
--build: apidoc \
-- libgridsite.so.$(VERSION) libgridsite.a htcp mod_gridsite.so \
-- urlencode findproxyfile real-gridsite-admin.cgi gsexec \
-+build: \
-+ libgridsite.so.$(VERSION) libgridsite.a htcp \
-+ urlencode findproxyfile real-gridsite-admin.cgi \
- gridsite-copy.cgi
-
- build: libgridsite_globus.so.$(VERSION) libgridsite_globus.a
-@@ -226,7 +226,7 @@
- # Install
- #
-
--install: apidoc
-+install:
- mkdir -p $(prefix)/include \
- $(prefix)/lib \
- $(prefix)/bin \
-@@ -273,8 +273,6 @@
- ln -sf htcp $(prefix)/bin/htmv
- ln -sf htcp $(prefix)/bin/htping
- ln -sf htcp $(prefix)/bin/htfind
-- cp -f gsexec $(prefix)/sbin
-- cp -f mod_gridsite.so $(prefix)/lib/httpd/modules
-
- #
- # Distributions
+++ /dev/null
-#!/bin/sh
-
-set -e
-
-TOPDIR=${PWD}
-source ${LB4VDTDIR}/Makefile.inc
-cd org.gridsite.core/src
-make build OPENSSL_FLAGS=-I${globus_prefix}/include/gcc32 OPENSSL_LIBS=-L${globus_prefix}/lib FLAVOR_EXT=_gcc32
-make install prefix=${STAGEDIR}
-cd ${TOPDIR}
-
+++ /dev/null
-package MultiStruct;
-
-use StructField;
-
-sub new {
- shift;
- my $self = {};
- $self->{comments} = {}; # typ->comment
- $self->{fields} = {}; # typ->{ name->StructField, ... }
- $self->{order} = {};
-
- bless $self;
-}
-
-sub selectType {
- my $self = shift;
- my $type = shift;
- $self->{type} = $type;
- 1;
-}
-
-sub addType {
- my $self = shift;
- my $type = shift;
- my $comment = shift;
- $self->selectType($type);
- $self->{comments}->{$type} = $comment;
- $self->{fields}->{$type} = {};
- 1;
-}
-
-sub selectField {
- my $self = shift;
- $self->{field} = shift;
- $self->getField;
-}
-
-sub addField {
- my $self = shift;
- my $field = shift;
-
- die "unselected type" unless $self->{type};
- $self->{fields}->{$self->{type}}->{$field->{name}} = $field;
- $self->selectField($field->{name});
- 1;
-}
-
-sub getField {
- my $self = shift;
- my $f = $self->{fields}->{$self->{type}}->{$self->{field}};
- return $f ? $f : $self->{fields}->{_common_}->{$self->{field}};
-}
-
-sub load {
- my $self = shift;
- my $fh = shift;
- local $_;
-
- while ($_ = <$fh>) {
-
- chomp;
- s/#.*$//;
- next if /^\s*$/;
-
- if (/^\@type\s+(\S+)\s*(.*$)$/) {
- $self->addType($1,$2);
- $self->{order}->{$1} = $.;
- next;
- }
-
- s/^\s*//;
- my ($ftype,$fname,$comment) = split /\s+/,$_,3;
- if ($ftype eq '_code_') {
- my $f = $self->getField();
- addCode $f $fname,$comment;
- }
- elsif ($ftype eq '_alias_') {
- my $f = $self->getField();
- addAlias $f $fname,$comment;
- }
- elsif ($ftype eq '_special_') {
- my $f = $self->getField();
- addSpecial $f $fname;
- }
- elsif ($ftype eq '_null_') {
- my $f = $self->getField();
- setNull $f $fname;
- }
- elsif ($ftype eq '_optional_') {
- my $f = $self->getField();
- $f->{optional} = 1;
- }
- elsif ($ftype eq '_index_') {
- my $f = $self->getField();
- $f->{index} = 1;
- }
- else {
- my $f = new StructField $fname,$ftype,$comment,$.;
- $self->addField($f);
- }
- }
-}
-
-sub getTypes {
- my $self = shift;
- my @out;
- local $_;
-
- for (keys %{$self->{fields}}) {
- push @out,$_ unless $_ eq '_common_';
- }
- @out;
-}
-
-sub getTypesOrdered {
- my $self = shift;
- my @names = getTypes $self;
-
- sort {
- my $oa = $self->{order}->{$a};
- my $ob = $self->{order}->{$b};
- $oa <=> $ob;
- } @names;
-}
-
-sub getTypeComment {
- my $self = shift;
- my $type = shift || $self->{type};
- $self->{comments}->{$type};
-}
-
-sub getFieldComment {
- my $self = shift;
- my $fname = shift;
- $self->{fields}->{$self->{type}}->{$fname}->{comment};
-}
-
-sub getFields {
- my $self = shift;
- keys %{$self->{fields}->{$self->{type}}};
-}
-
-sub getFieldsOrdered {
- my $self = shift;
- my @names = $self->getFields;
- sort {
- my $oa = $self->selectField($a)->{order};
- my $ob = $self->selectField($b)->{order};
- $oa <=> $ob;
- } @names;
-}
-
-sub getFieldOccurence {
- my $self = shift;
- my $fname = shift;
- my @out;
- local $_;
-
- for (keys %{$self->{fields}}) {
- push @out,$_ if $self->{fields}->{$_}->{$fname};
- }
- @out;
-}
-
-sub getAllFields {
- my $self = shift;
- my %out;
- local $_;
-
- for my $t (values %{$self->{fields}}) {
- $out{$_->{name}} = 1 for (values %$t);
- }
- keys %out;
-}
-
-sub getAllFieldsOrdered {
- my $self = shift;
- my @names = getAllFields $self;
-
- sort {
- my @occ = $self->getFieldOccurence($a);
- $self->selectType($occ[0]);
- my $oa = $self->selectField($a)->{order};
- @occ = $self->getFieldOccurence($b);
- $self->selectType($occ[0]);
- my $ob = $self->selectField($b)->{order};
- $oa <=> $ob;
- } @names;
-}
-
-1;
+++ /dev/null
-package StructField;
-
-$lang = 'C';
-1;
-
-sub new {
- shift;
- my $self = {};
- $self->{name} = shift;
- $self->{type} = shift;
- $self->{comment} = shift;
- $self->{order} = shift;
- $self->{null} = $main::DefaultNullValue{$self->{type}};
- bless $self;
-}
-
-sub addCode {
- my $self = shift;
- my $code = shift;
- my $comment = shift;
- push @{$self->{codes}},{name=>$code,comment=>$comment};
- 1;
-}
-
-sub addSpecial {
- my $self = shift;
- my $special = shift;
- $self->{special} = $special;
- 1;
-}
-
-sub addAlias {
- my $self = shift;
- my $name = shift;
- my $lang = shift;
- $self->{aliases}->{$lang} = $name;
- 1;
-}
-
-sub hasAlias {
- my $self = shift;
- my $lang = shift;
- return $self->{aliases}->{$lang} ? 1 : 0;
-}
-
-sub getName {
- my $self = shift;
- my $lang = shift || $lang;
- $self->{aliases}->{$lang} || $self->{name};
-# return $self->{aliases}->{$lang} ? $self->{aliases}->{$lang} : $self->{name};
-}
-
-sub getComment {
- my $self = shift;
- $self->{comment};
-}
-
-sub getDefaultNullValue {
- my $self = shift;
- $self->{null};
-}
-
-sub toString {
- my $self = shift;
- my $src = shift;
- my $dst = shift;
-
- eval $main::toString{$lang}->{$self->{type}};
-}
-
-sub fromString {
- my $self = shift;
- my $src = shift;
- my $dst = shift;
-
- eval $main::fromString{$lang}->{$self->{type}};
-}
-
-sub isNULL {
- my $self = shift;
- my $a = shift;
- my $b = $self->{null};
-
- eval $main::compare{$lang}->{$self->{type}};
-}
-
-sub isnotNULL {
- my $self = shift;
- my $src = shift;
-
- '!('.$self->isNULL($src).')';
-}
-
-sub compare {
- my $self = shift;
- my $a = shift;
- my $b = shift;
- eval $main::compare{$lang}->{$self->{type}};
-}
-
-sub toFormatString {
- my $self = shift;
-
- eval $main::toFormatString{$lang}->{$self->{type}};
-}
-
-sub setNull {
- my $self = shift;
- $self->{null} = shift;
-}
-
-sub getType {
- my $self = shift;
-
- eval $main::types{$lang}->{$self->{type}};
-}
+++ /dev/null
-#!/usr/bin/perl -w
-
-use File::Basename;
-my $dir;
-BEGIN{
- $dir = dirname $0;
-}
-
-my $lines = $ENV{AT3_LINES};
-
-use lib $dir;
-use MultiStruct;
-require 'types.T';
-
-my $eventsn;
-for (@INC) {
- if (-f "$_/events.T") {
- $eventsn="$_/events.T";
- last;
- }
-}
-
-my $statusn;
-for (@INC) {
- if (-f "$_/status.T") {
- $statusn = "$_/status.T";
- last;
- }
-}
-
-my $indent = '';
-
-my $event = new MultiStruct;
-my $status = new MultiStruct;
-
-sub gen {
- local $_ = shift;
-
- s/^\n!//;
- s/\n!/\n/g;
- print $_;
-}
-
-
-open EVENTS,$eventsn or die "$eventsn: $!\n";
-$event->load(\*EVENTS);
-close EVENTS;
-
-open STATUS,$statusn or die "$statusn: $!\n";
-$status->load(\*STATUS);
-close STATUS;
-
-my $code;
-my $startcode;
-while (<>) {
- chomp;
- if (/^\@\@\@LANG: (\S+)$/) {
- $StructField::lang = $1;
- next;
- }
-
- if ($code) {
- if (/^\@\@\@}$/) {
- $code .= "1;\n";
- print "#line $startcode \"$ARGV\"\n/* begin */\n" if $lines;
- eval $code or warn "eval: $@ at $ARGV:$.\n";
- my $nxtline = $.+1;
- print "/* end */\n#line $nxtline \"$ARGV\"\n" if $lines;
- undef $code;
- }
- else { $code .= $_."\n"; }
- }
- else {
- if (/^\@\@\@{$/) {
- $startcode = $.;
- $code = "\n";
- }
- elsif (/^\@\@\@AUTO$/) {
- print qq{
- !! Automatically generated file
- !! Do not edit, your changes will be discarded upon build
- !! Change the corresponding template file $ARGV
-
-};
- print "#line $. \"$ARGV\"\n" if $lines;
- }
- else {
- print "$_\n";
- }
- }
-}
-
-# print $event_common{prog}->copy('bla','hu');
+++ /dev/null
-#Mon Apr 03 07:53:40 CEST 2006
-module.build=0241
+++ /dev/null
-ext.gsoap.version=2.7.0
+++ /dev/null
-
-###################################################################
-# System dependencies
-###################################################################
-
-org.glite.version = connpool_branch
-org.glite.lb.version = connpool_branch
-
-# Component dependencies tag = do not remove this line =
-org.glite.lb.client-interface.version = connpool_branch
-org.glite.lb.ws-interface.version = connpool_branch
-org.glite.lb.common.version = connpool_branch
-org.glite.lb.client.version = connpool_branch
-org.glite.lb.server.version = connpool_branch
-org.glite.lb.proxy.version = connpool_branch
-org.glite.lb.server-bones.version = connpool_branch
-org.glite.lb.logger.version = connpool_branch
-org.glite.lb.utils.version = connpool_branch
-
-ext.gsoap.version = 2.7.0
+++ /dev/null
-@type _common_
- timeval timestamp Time the event was generated.
- _alias_ date ULM
- timeval arrived Time the event was stored into the bookkeeping server database.
- _alias_ arr_date ULM
- _optional_
- string host Hostname of the machine where the event was generated.
- _alias_ host ULM
- int level Logging level (in the range from DEBUG to EMERGENCY).
- _alias_ lvl ULM
- _code_ EMERGENCY emergency
- _code_ ALERT alert
- _code_ ERROR error
- _code_ WARNING warning
- _code_ AUTH authentication
- _code_ SECURITY security
- _code_ USAGE usage
- _code_ SYSTEM system
- _code_ IMPORTANT important
- _code_ DEBUG debug
- int priority Message priority (yet 0 for asynchronous and 1 for synchronous transfers).
- _null_ -1
- jobid jobId Grid job id of the job the event belongs to.
- string seqcode Sequence code assigned to the event.
- string user Identity (certificate subject) of the event sender.
- logsrc source Source (software component) which generated this event.
-# string prog name of program ("EDG WMS" of name of the application).
- string src_instance Instance of source component (e.g. service communication endpoint).
- _optional_
-
-@type Transfer Start, success, or failure of job transfer to another component.
- logsrc destination Destination where the job is being transfered to.
- string dest_host Hostname of server that takes over control of the job.
- string dest_instance Service (instance) that takes over control of the job.
- _optional_
- string job Job description in receiver's language.
- int result Result code of the transfer attempt (START, OK, REFUSED or FAIL).
- _code_ START The sending component has started or is about to start the transfer.
- _code_ OK The job was sent successfully.
- _code_ REFUSED The job was refused by the other component.
- _code_ FAIL The transfer failed for other reason than explicit refusal (eg. network timeout).
- string reason Detailed description of the transfer, especially reason of failure.
- _optional_
- string dest_jobid Job id as assigned by the receiving software component.
- _optional_
-
-@type Accepted Accepting job (successful counterpart to Transfer).
- logsrc from The software component the job was received from.
- string from_host Hostname of the component the job was received from.
- string from_instance Instance of the component the job was received from.
- _optional_
- string local_jobid New job id as assigned by the receiving component.
-
-@type Refused Refusing job (unsuccessful counterpart to Transfer).
- logsrc from The software component that tried to send the job.
- string from_host Hostname of the component that tried to send the job.
- string from_instance Instance of the component that tried to send the job.
- _optional_
- string reason Description of the reason why the job was refused.
-
-@type EnQueued The job has been enqueued in an inter-component queue.
- string queue Queue into which the job has been stored for retrieval by another component.
- string job Job description in the receiver's language.
- int result Result code of the attempt to put job into the queue (START, OK, REFUSED or FAIL).
- _code_ START The sending component has started or is about to start enqueuing the job.
- _code_ OK The job was enqueued successfully.
- _code_ REFUSED The job was refused by the other component.
- _code_ FAIL The transfer failed for other reason than explicit refusal.
- string reason Detailed description of the attempt to enqueue the job, especially the reason of failure.
-
-@type DeQueued The job has been dequeued from an inter-component queue.
- string queue Name of the queue the job was obtained from.
- string local_jobid New job id as assigned by the retreiving component.
-
-@type HelperCall Helper component is called.
- string helper_name Name of the called helper component.
- string helper_params Parameters of the call to the helper component.
- int src_role The role the event sender is playing in the helper call (CALLING or CALLEE).
- _code_ CALLING The logging component is caller.
- _code_ CALLED The logging component is callee.
-
-@type HelperReturn Helper component is returning the control.
- string helper_name Name of the called helper component.
- string retval Data returned by the call to the helper component.
- int src_role The role the event sender is playing in the helper call (CALLING or CALLEE).
- _code_ CALLING The logging component is caller.
- _code_ CALLED The logging component is callee.
-
-@type Running Job wrapper started.
- string node Worker node on which the job executable is being run.
-
-@type Resubmission Result of resubmission decision.
- int result Result code of the resubmission decision (WILLRESUB or WONTRESUB or SHALLOW).
- _code_ WILLRESUB The job will be resubmitted (deep resubmission).
- _code_ WONTRESUB The job will not be resubmitted.
- _code_ SHALLOW Shallow resubmission (user payload has not started yet)
- string reason Reason why the job will or will not be resubmitted.
- string tag Value of the attribute on which the decision to resubmit the job was based.
-
-@type Done Execution terminated (normally or abnormally).
- int status_code Reason code for the termination of the job (OK, FAILED or CANCELLED).
- _code_ OK The job terminated by itself.
- _code_ FAILED The job disappeared from LRMS.
- _code_ CANCELLED The job was cancelled by user request.
- string reason Detailed description why the job was terminated.
- int exit_code Exit code of the job's process.
- _null_ -1
-
-@type Cancel Cancel operation has been attempted on the job.
- int status_code Classification of the attempt to cancel the job (REQ, REFUSE, DONE or ABORT).
- _code_ REQ The request was acknowledged.
- _code_ REFUSE The request was declined by this component.
- _code_ DONE The request was completed by whole WMS.
- _code_ ABORT The request was refused by whole WMS.
- string reason Detailed description of the attempt to cancel the job, especially the reason of failure.
-
-@type Abort Job aborted by system.
- string reason Reason why the job was aborted by the system.
-
-@type Clear Job cleared, output sandbox removed
- int reason Description of the reason why the job was cleared and the output sandbox removed (USER, TIMEOUT or NOOUTPUT).
- _code_ USER User retrieved output sandbox.
- _code_ TIMEOUT Timed out, resource forced purge of the sandbox.
- _code_ NOOUTPUT No output was generated.
-
-@type Purge Job is purged from bookkepping server.
-
-@type Match Matching CE found.
- string dest_id Identification of the queue on the CE that the job could be send to.
-
-@type Pending No matching CE found yet.
- string reason Description why the matching CE for the job was not found (yet).
-
-@type RegJob New job registration.
- string jdl Job description of the job being registered.
- string ns NetworkServer handling the newly registered job.
- jobid parent Grid job id of the parent job registering this new one.
- _optional_
-
- int jobtype Type of the job being registered (SIMPLE, DAG, PARTITIONABLE or PARTITIONED).
- _code_ SIMPLE The job is simple job.
- _code_ DAG The job is dag (containing static set of subjobs).
- _code_ PARTITIONABLE The job is partitionable (may become partitioned).
- _code_ PARTITIONED The job is partitioned (dynamically created dag).
-
- int nsubjobs Number of subjobs this job plans to spawn.
- _optional_
- string seed Seed for subjob id generation.
- _optional_
-
-@type Chkpt Application-specific checkpoint record.
- string tag Application specific checkpoint tag.
- string classad Application specific checkpoint value.
-
-@type Listener Listening network port for interactive control.
- string svc_name Name of the port instance for interactive job control.
- string svc_host Hostname of the interactive job controller.
- port svc_port Port number of the interactive job controller.
-
-@type CurDescr Current state of job processing (optional event).
- string descr Description of the current job transformation (output of the helper).
-
-@type UserTag User tag -- arbitrary name=value pair.
- string name Arbitrary user tag name.
- string value Arbitrary user tag value.
-
-@type ChangeACL Management of ACL stored on bookkepping server.
- string user_id DN or VOMS parameter (in format VO:group).
- int user_id_type Type of information given in user_id (DN or VOMS).
- _null_ -1
- int permission ACL permission to change (currently only READ).
- _null_ -1
- int permission_type Type of permission requested ('allow', 'deny').
- _null_ -1
- int operation Operation requested to perform with ACL (add, remove).
- _null_ -1
-
-@type Notification Management of notification service.
- notifid notifId Notification id.
- string owner Identification of the job owner (certificate subject).
- string dest_host Hostname the notification is sent to.
- port dest_port Port number the notification is sent to.
- string jobstat Status of the job (the notification content).
-
-
-@type ResourceUsage Resource (CPU, memory etc.) consumption
- string resource which resource
- int quantity how much
- string unit units (sec, kB, etc.)
-
-@type ReallyRunning User payload started
- _optional_
- string wn_seq sequence code on the worker node
-
-@type Suspend Job execution (queuing) was suspended
- _optional_
- string reason reason for the suspend
-
-@type Resume Job execution (queuing) was resumed
- _optional_
- string reason reason for the resume
+++ /dev/null
-<?xml version="1.0"?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- gLite Middleware Logging and Bookkeping Configuration Specification File
-
- Authors: Alberto Di Meglio <alberto.di.meglio@cern.ch>
- Joachim Flammer <Joachim.Flammer@cern.ch>
- Ales Krenek <ljocha@ics.muni.cz>
- Version info: $Id$
- Release: $Name$
-
- Revision history:
- $Log$
- Revision 1.16 2006/03/15 17:33:24 akrenek
- merge of 1.5 branch
-
- Revision 1.15 2005/11/27 21:51:18 eronchie
- Applied fix for bug 13928
-
- Revision 1.14 2005/10/15 20:14:55 jpospi
- remove duplicate lb.utils section
-
- Revision 1.13.2.1.2.1 2005/11/28 10:39:08 akrenek
- merge bug #13928 fix on the pre_cares branch
-
- Revision 1.13.2.1 2005/10/17 16:27:48 akrenek
- merged in the duplicate utils targed patch
-
- Revision 1.15 2005/11/27 21:51:18 eronchie
- Applied fix for bug 13928
-
- Revision 1.14 2005/10/15 20:14:55 jpospi
- remove duplicate lb.utils section
-
- Revision 1.13 2005/10/15 13:36:26 akrenek
- added lb.utils
-
- Revision 1.12 2005/03/16 10:02:52 zsalvet
- proxy component added
-
- Revision 1.11 2004/12/01 18:01:55 zsalvet
- LB here, not R-GMA
-
- Revision 1.10 2004/11/29 16:01:21 zsalvet
- Evaluate component.{head,tag} conditions before use of get.* targets.
-
- Revision 1.9 2004/11/29 15:16:26 zsalvet
- Add ws-interface to checkout/build machinery.
-
- Revision 1.8 2004/10/18 22:55:14 dimeglio
- Added oscheck to various targets
-
- Revision 1.7 2004/10/15 12:30:01 akrenek
- build with lb.sever-bones
-
- Revision 1.6 2004/08/27 10:03:41 zurek
- typo problem
-
- Revision 1.5 2004/08/27 09:13:11 zurek
- changing thee order for build
-
- Revision 1.4 2004/07/29 10:47:46 dimeglio
- Added server and logger
-
- Revision 1.3 2004/07/16 16:25:43 flammer
- Added tags for automated adding of components.
-
- Revision 1.2 2004/06/23 00:22:11 dimeglio
- Added client and client-interface components
-
- Revision 1.1 2004/06/18 23:05:53 dimeglio
- Added/upgraded default build scripts
-
--->
-
-
-<project name="GLite Middleware Logging and Bookkeping CSF" default="all">
-
- <!-- ===============================================
- Determine workspace directory
- =============================================== -->
-
- <!-- Relative workspace root directory -->
- <property name="workspace.dir" value="../.." />
-
- <!-- ===============================================
- Load properties
- =============================================== -->
-
- <!-- load baseline and user properties -->
- <import file="${workspace.dir}/org.glite/project/baseline.properties.xml" />
-
- <!-- define build properties file location since we are already in project dir -->
- <property name="subsystem.build.properties.file" value="./build.properties" />
-
- <!-- Load subsystem-specific property file -->
- <property file="./properties.xml"/>
-
- <!-- load global properties -->
- <import file="${global.properties.file}" />
-
- <!-- ===============================================
- Load dependencies
- =============================================== -->
-
- <!-- Load user dependencies file -->
- <property file="${user.dependencies.file}" />
-
- <!-- Load subsystem dependencies file -->
- <property file="./dependencies.properties" />
-
- <!-- Load global dependencies file -->
- <property file="${global.dependencies.file}" />
-
- <!-- ===============================================
- Load targets
- =============================================== -->
- <import file="${global.targets-envchecks.file}" />
- <import file="${global.targets-external-dependencies.file}" />
-
- <!-- ===============================================
- Evaluate CVS tags
- =============================================== -->
-
- <target name="evaluate.cvs.tags" description="Figure out if we need tagged CVS checkout">
- <condition property="glite.head">
- <and>
- <equals arg1="${org.glite.version}" arg2="HEAD" />
- <or>
- <istrue value="${update}" />
- <not>
- <available file="${global.dependencies.file}" type="file" />
- </not>
- </or>
- </and>
- </condition>
- <condition property="glite.tag">
- <and>
- <not>
- <equals arg1="${org.glite.version}" arg2="HEAD" />
- </not>
- <or>
- <istrue value="${update}" />
- <not>
- <available file="${global.dependencies.file}" type="file" />
- </not>
- </or>
- </and>
- </condition>
- <condition property="glite-lb.head">
- <and>
- <equals arg1="${org.glite.lb.version}" arg2="HEAD" />
- <istrue value="${update}" />
- </and>
- </condition>
- <condition property="glite-lb.tag">
- <and>
- <not>
- <equals arg1="${org.glite.lb.version}" arg2="HEAD" />
- </not>
- <istrue value="${update}" />
- </and>
- </condition>
-
- <!-- condition property tag = do not remove = -->
-
- <condition property="utils.head">
- <equals arg1="${org.glite.lb.utils.version}" arg2="HEAD" />
- </condition>
-
- <condition property="common.head">
- <equals arg1="${org.glite.lb.common.version}" arg2="HEAD" />
- </condition>
-
- <condition property="server-bones.head">
- <equals arg1="${org.glite.lb.server-bones.version}" arg2="HEAD" />
- </condition>
-
- <condition property="client-interface.head">
- <equals arg1="${org.glite.lb.client-interface.version}" arg2="HEAD" />
- </condition>
-
- <condition property="ws-interface.head">
- <equals arg1="${org.glite.lb.ws-interface.version}" arg2="HEAD" />
- </condition>
-
- <condition property="client.head">
- <equals arg1="${org.glite.lb.client.version}" arg2="HEAD" />
- </condition>
-
- <condition property="server.head">
- <equals arg1="${org.glite.lb.server.version}" arg2="HEAD" />
- </condition>
-
- <condition property="proxy.head">
- <equals arg1="${org.glite.lb.proxy.version}" arg2="HEAD" />
- </condition>
-
- <condition property="logger.head">
- <equals arg1="${org.glite.lb.logger.version}" arg2="HEAD" />
- </condition>
-
- </target>
-
- <presetdef name="cvs-co">
- <cvs command="checkout" dest="${workspace.dir}" />
- </presetdef>
-
- <!-- =====================================================
- Self-update if required
- ===================================================== -->
-
- <!-- Update main GLite module -->
- <target name="org.glite" depends="evaluate.cvs.tags, get.glite.head, get.glite.tag"/>
- <target name="get.glite.head" if="glite.head">
- <cvs-co package="org.glite" />
- </target>
- <target name="get.glite.tag" if="glite.tag">
- <cvs-co package="org.glite"
- tag="${org.glite.version}" />
- </target>
-
- <!-- Update the current module -->
- <target name="org.glite.lb" depends="evaluate.cvs.tags, get.glite-lb.head, get.glite-lb.tag"/>
- <target name="get.glite-lb.head" if="glite-lb.head">
- <cvs-co package="org.glite.lb" />
- <fail>The org.glite and org.glite.lb modules have been updated, please rerun the configuration file</fail>
- </target>
- <target name="get.glite-lb.tag" if="glite-lb.tag">
- <cvs-co package="org.glite.lb"
- tag="${org.glite.lb.version}" />
- <fail>The org.glite and org.glite.lb modules have been updated, please rerun the configuration file</fail>
- </target>
-
- <!-- *****************************************************-->
- <!-- Development tools -->
- <!-- *****************************************************-->
-
- <!-- All development tools -->
- <target name="devtools" depends="oscheck,
- evaluate.cvs.tags,
- junitcheck,
- junit,
- chkstyle,
- jalopy,
- ant-contrib,
- cpptasks,
- egee-ant-ext"
- description="Get development tools into repository."/>
-
- <!-- =====================================================
- External libraries
- ===================================================== -->
-
- <!-- All external libraries -->
- <target name="external" depends="oscheck,
- evaluate.cvs.tags,
- classads,
- globus"
- description="Install external packages"/>
-
- <!-- =====================================================
- GLite Middleware LB modules
- ===================================================== -->
-
- <!-- component targets tag = do not remove = -->
-
- <!-- common -->
- <target name="common" depends="evaluate.cvs.tags, get.common.head, get.common.tag"/>
- <target name="get.common.head" if="common.head">
- <cvs-co package="org.glite.lb.common" />
- </target>
- <target name="get.common.tag" unless="common.head">
- <cvs-co package="org.glite.lb.common"
- tag="${org.glite.lb.common.version}" />
- </target>
-
- <!-- server-bones -->
- <target name="server-bones" depends="evaluate.cvs.tags, get.server-bones.head, get.server-bones.tag"/>
- <target name="get.server-bones.head" if="server-bones.head">
- <cvs-co package="org.glite.lb.server-bones" />
- </target>
- <target name="get.server-bones.tag" unless="server-bones.head">
- <cvs-co package="org.glite.lb.server-bones"
- tag="${org.glite.lb.server-bones.version}" />
- </target>
-
- <!-- client-interface -->
- <target name="client-interface" depends="evaluate.cvs.tags, get.client-interface.head, get.client-interface.tag"/>
- <target name="get.client-interface.head" if="client-interface.head">
- <cvs-co package="org.glite.lb.client-interface" />
- </target>
- <target name="get.client-interface.tag" unless="client-interface.head">
- <cvs-co package="org.glite.lb.client-interface"
- tag="${org.glite.lb.client-interface.version}" />
- </target>
-
- <!-- WS interface -->
- <target name="ws-interface" depends="evaluate.cvs.tags, get.ws-interface.head, get.ws-interface.tag"/>
- <target name="get.ws-interface.head" if="ws-interface.head">
- <cvs-co package="org.glite.lb.ws-interface" />
- </target>
- <target name="get.ws-interface.tag" unless="ws-interface.head">
- <cvs-co package="org.glite.lb.ws-interface"
- tag="${org.glite.lb.ws-interface.version}" />
- </target>
-
- <!-- client -->
- <target name="client" depends="evaluate.cvs.tags, get.client.head, get.client.tag"/>
- <target name="get.client.head" if="client.head">
- <cvs-co package="org.glite.lb.client" />
- </target>
- <target name="get.client.tag" unless="client.head">
- <cvs-co package="org.glite.lb.client"
- tag="${org.glite.lb.client.version}" />
- </target>
-
- <!-- server -->
- <target name="server" depends="evaluate.cvs.tags, get.server.head, get.server.tag"/>
- <target name="get.server.head" if="server.head">
- <cvs-co package="org.glite.lb.server" />
- </target>
- <target name="get.server.tag" unless="server.head">
- <cvs-co package="org.glite.lb.server"
- tag="${org.glite.lb.server.version}" />
- </target>
-
- <!-- proxy -->
- <target name="proxy" depends="evaluate.cvs.tags, get.proxy.head, get.proxy.tag"/>
- <target name="get.proxy.head" if="proxy.head">
- <cvs-co package="org.glite.lb.proxy" />
- </target>
- <target name="get.proxy.tag" unless="proxy.head">
- <cvs-co package="org.glite.lb.proxy"
- tag="${org.glite.lb.proxy.version}" />
- </target>
-
- <!-- logger -->
- <target name="logger" depends="evaluate.cvs.tags, get.logger.head, get.logger.tag"/>
- <target name="get.logger.head" if="logger.head">
- <cvs-co package="org.glite.lb.logger" />
- </target>
- <target name="get.logger.tag" unless="logger.head">
- <cvs-co package="org.glite.lb.logger"
- tag="${org.glite.lb.logger.version}" />
- </target>
-
- <!-- utils -->
- <target name="utils" depends="get.utils.head, get.utils.tag"/>
- <target name="get.utils.head" if="utils.head">
- <cvs-co package="org.glite.lb.utils" />
- </target>
- <target name="get.utils.tag" unless="utils.head">
- <cvs-co package="org.glite.lb.utils"
- tag="${org.glite.lb.utils.version}" />
- </target>
-
- <!-- All project modules -->
- <target name="project" depends="utils,
- client-interface,
- ws-interface,
- common,
- server-bones,
- client,
- server,
- proxy,
- logger"/>
-
-
- <!-- ====================================================
- Checkout all
- ==================================================== -->
-
- <!-- All libraries -->
- <target name="all" depends="oscheck,evaluate.cvs.tags, defaultenvchecks, org.glite, org.glite.lb, devtools, external, project" />
-
- <!-- ====================================================
- Print dependecies to console
- ==================================================== -->
-
- <target name="dependencies">
- <concat>
- <fileset dir="." includes="dependencies.properties" />
- </concat>
- </target>
-
-</project>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- Common build properties file for the gLite LB modules
-
- Authors: Ales Krenek <ljocha@ics.muni.cz>
- Alberto Di Meglio <alberto.di.meglio@cern.ch>
- Version info: $Id$
- Release: $Name$
-
- Revision history:
- $Log$
--->
-
-<project name="Logging and Bookkeping subsystem common properties">
-
- <!-- Include build properties to allow overwriting
- of properties for subsystem -->
- <property name="subsystem.build.properties.file" value="./project/build.properties" />
- <property file="${subsystem.build.properties.file}" />
-
- <!-- ======================================================
- Define subsystem properties
- ====================================================== -->
-
- <!-- Subsystem name -->
- <property name="subsystem.name" value="${lb.subsystem.name}"/>
-
- <!-- Subsystem prefix -->
- <property name="subsystem.prefix" value="${lb.subsystem.prefix}"/>
-
- <!-- ======================================================
- Define general subsystem properties
- ====================================================== -->
-
- <!-- Include common subsystem properties -->
- <import file="${subsystem.general.properties.file}" />
-
- <!-- ======================================================
- Define extra properties here ...
- ====================================================== -->
-
-
-</project>
\ No newline at end of file
+++ /dev/null
-#!/bin/sh
-
-cd ../..
-
-cvs co org.glite
-cvs co org.glite.lb
-
-cd org.glite.lb/project
-ant -f glite.lb.csf.xml
-
+++ /dev/null
-@type _common_
-jobid jobId Id of the job
-string owner Job owner
-_index_
-
-int jobtype Type of job
- _null_ -1
- _code_ SIMPLE simple job
- _code_ DAG composite job
-jobid parent_job parent job of subjob
-
-string seed string used for generation of subjob IDs
-int children_num number of subjobs
-strlist children list of subjob IDs
- _special_ XMLstructured
-intlist children_hist summary (histogram) of children job states
- _special_ XMLstructured
-stslist children_states full status information of the children
- _special_ XMLstructured
-
-string condorId Id within Condor-G
-string globusId Globus allocated Id
-string localId Id within LRMS
-
-string jdl User submitted job description
-string matched_jdl Full job description after matchmaking
-string destination ID of CE where the job is being sent
-_index_
-string condor_jdl ClassAd passed to Condor-G for last job execution
-string rsl Job RSL sent to Globus
-
-string reason Reason of being in this status, if any
-
-string location Where the job is being processed
-_index_
-string ce_node Worker node where the job is executed
-string network_server Network server handling the job
-
-bool subjob_failed Subjob failed (the parent job will fail too)
-int done_code Return code
- _null_ -1
- _code_ OK Finished correctly
- _code_ FAILED Execution failed
- _code_ CANCELLED Cancelled by user
-int exit_code Unix exit code
-bool resubmitted The job was resubmitted
-
-bool cancelling Cancellation request in progress
-string cancelReason Reason of cancel
-
-int cpuTime Consumed CPU time
- _null_ -1
-
-taglist user_tags List of pairs (user_tag, user_value)
- _special_ XMLstructured
-
-timeval stateEnterTime When entered this status
-timeval lastUpdateTime Last known event of the job
-
-intlist stateEnterTimes When all previous states were entered
- _special_ XMLstructured
-
-bool expectUpdate Some logged information has not arrived yet
-string expectFrom Sources of the missing information
-string acl ACL of the job
-
-bool payload_running User payload started
-strlist possible_destinations Possible job destinations
- _special_ XMLstructured
-strlist possible_ce_nodes CE nodes matching to possible_destinations
- _special_ XMLstructured
-
-bool suspended Job is suspended
-string suspend_reason Reason for the suspend
-
-@type Submitted entered by the user to the User Interface or registered by Job Partitioner
-@type Waiting Accepted by WMS, waiting for resource allocation
-@type Ready Matching resources found
-@type Scheduled Accepted by LRMS queue
-@type Running Executable is running
-@type Done Execution finished, output is available
-@type Cleared Output transfered back to user and freed
-@type Aborted Aborted by system (at any stage)
-@type Cancelled Cancelled by user
-@type Unknown Status cannot be determined
-@type Purged Job has been purged from bookkeeping server (for LB->RGMA interface)
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2004 on behalf of the EU EGEE Project:
- The European Organization for Nuclear Research (CERN),
- Istituto Nazionale di Fisica Nucleare (INFN), Italy
- Datamat Spa, Italy
- Centre National de la Recherche Scientifique (CNRS), France
- CS Systeme d'Information (CSSI), France
- Royal Institute of Technology, Center for Parallel Computers (KTH-PDC), Sweden
- Universiteit van Amsterdam (UvA), Netherlands
- University of Helsinki (UH.HIP), Finland
- University of Bergen (UiB), Norway
- Council for the Central Laboratory of the Research Councils (CCLRC), United Kingdom
-
- Common Ant task definition file for the gLite Logging and Bookeeping modules
-
- Authors: Ales Krenek <ljocha@ics.muni.cz>
- Alberto Di Meglio <alberto.di.meglio@cern.ch>
- Version info: $Id$
- Release: $Name$
-
- Revision history:
- $Log$
--->
-
-<project name="Logging and Bookeeping subsystem common tasks and types definitions">
-
- <!-- ======================================================
- Subsystem task definitions
- ====================================================== -->
-
-</project>
\ No newline at end of file
+++ /dev/null
-%types = (
- C=>{
- bool=>'"int"',
- string=>'"char *"',
- strlist=>'"char **"',
- intlist=>'"int *"',
- taglist=>'"edg_wll_TagValue *"',
- stslist=>'"struct _edg_wll_JobStat *"',
- timeval=>'"struct timeval"',
- jobid=>'"edg_wlc_JobId"',
- notifid=>'"edg_wll_NotifId"',
- logsrc=>'"edg_wll_Source"',
- port=>'"uint16_t"',
-# level=>'"enum edg_wll_Level"',
- int=>'"int"'
- },
- 'C++'=>{
- string=>'"std::string"',
- timeval=>'"struct timeval"',
- jobid=>'"edg::workload::common::jobid::JobId"',
- bool=>'"int"',
- intlist=>'"std::vector<int>"',
- strlist=>'"std::vector<std::string>"',
- taglist=>'"std::vector<std::pair<std::string>>"',
- stslist=>'"std::vector<JobStatus>"',
- logsrc=>'"int"',
- port=>'"int"',
- int=>'"int"'
- },
- 'wsdl'=>{
- bool=>'"xsd:boolean"',
- string=>'"xsd:string"',
- int=>'"xsd:int"',
- jobid=>'"xsd:string"',
- jobstat=>'"jobStatus"',
- usertag=>'"tagValue"',
- timeval=>'"timeval"',
- logsrc=>'"eventSource"',
- notifid=>'"xsd:string"',
- port=>'"xsd:int"',
- }
-);
-
-%baseTypes = (
- intlist=>'int',
- strlist=>'string',
- stslist=>'jobstat',
- taglist=>'usertag'
-);
-
-%toString = (
- C=>{
- int=>'qq{asprintf(&$dst,"%d",$src);}',
- port=>'qq{asprintf(&$dst,"%d",(int) $src);}',
- bool=>'qq{asprintf(&$dst,"%d",$src);}',
- string=>'qq{$dst = $src?strdup($src):NULL;}',
- timeval=>'qq{edg_wll_ULMTimevalToDate(($src).tv_sec,($src).tv_usec,$dst);}',
- jobid=>'qq{$dst = edg_wlc_JobIdUnparse($src);}',
- notifid=>'qq{$dst = edg_wll_NotifIdUnparse($src);}',
-# level=>'qq{$dst = edg_wll_LevelToString($src);}',
- logsrc=>'qq{$dst = edg_wll_SourceToString($src);}',
-# strlist, intlist, stslist are used only in consumer API, they don't need toString method
- }
-);
-
-%ULMasString = (
- logsrc=>1
-);
-
-%fromString = (
- C=>{
- int=>'qq{$dst = atoi($src);}',
- port=>'qq{$dst = (uint16_t) atoi($src);}',
- bool=>'qq{$dst = atoi($src);}',
- string=>'qq{$dst = strdup($src);}',
- timeval=>'qq{edg_wll_ULMDateToTimeval($src,&$dst);}',
- jobid=>'qq{edg_wlc_JobIdParse($src,&$dst);}',
- notifid=>'qq{edg_wll_NotifIdParse($src,&$dst);}',
-# level=>'qq{$dst = edg_wll_StringToLevel($src);}',
- logsrc=>'qq{$dst = edg_wll_StringToSource($src);}',
-# strlist, intlist, stslist are used only in consumer API, they don't need fromString method
- }
-);
-
-%DefaultNullValue = (
- int=>0,
- port=>0,
-# level=>'EDG_WLL_LEVEL_UNDEFINED',
- bool=>0,
- string=>'NULL',
- jobid=>'NULL',
- notifid=>'NULL',
- logsrc=>'EDG_WLL_SOURCE_NONE',
- timeval=>'null_timeval',
- strlist=>'NULL',
- intlist=>'NULL',
- taglist=>'NULL',
- stslist=>'NULL',
-);
-
-%compare = (
- C=>{
- int=>'"($a == $b)"',
- port=>'"($a == $b)"',
-# level=>'"($a == $b)"',
- bool=>'"(($a || !$b) && ($b || !$a))"',
- string=>'"(($a) == NULL && ($b) == NULL) || (($a)&&($b)&& !strcmp($a,$b))"',
- jobid=>'"(($a) == NULL && ($b) == NULL) || (($a)&&($b)&& !strcmp(edg_wlc_JobIdUnparse($a),edg_wlc_JobIdUnparse($b)))"',
- notifid=>'"($a) == ($b)"',
- logsrc=>'"($a) == ($b)"',
- timeval=>'"($a).tv_sec == ($b).tv_sec && ($a).tv_usec == ($b).tv_usec"',
- }
-);
-
-%toFormatString = (
- C=>{
- int=>'"%d"',
- port=>'"%d"',
- bool=>'"%d"',
-# level=>'"%s"',
- string=>'"%|Us"',
- jobid=>'"%s"',
- notifid=>'"%s"',
- logsrc=>'"%s"',
- timeval=>'"%s"',
- }
-);
+++ /dev/null
-#Fri Sep 02 14:19:10 CEST 2005
-module.version=1.3.0
-module.age=0